1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #define DEBUG_TYPE "x86tti"
19 #include "X86TargetMachine.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/IR/IntrinsicInst.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Target/CostTable.h"
24 #include "llvm/Target/TargetLowering.h"
27 // Declare the pass initialization routine locally as target-specific passes
28 // don't havve a target-wide initialization entry point, and so we rely on the
29 // pass constructor initialization.
31 void initializeX86TTIPass(PassRegistry &);
36 class X86TTI LLVM_FINAL : public ImmutablePass, public TargetTransformInfo {
37 const X86Subtarget *ST;
38 const X86TargetLowering *TLI;
40 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
41 /// are set if the result needs to be inserted and/or extracted from vectors.
42 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
45 X86TTI() : ImmutablePass(ID), ST(0), TLI(0) {
46 llvm_unreachable("This pass cannot be directly constructed");
49 X86TTI(const X86TargetMachine *TM)
50 : ImmutablePass(ID), ST(TM->getSubtargetImpl()),
51 TLI(TM->getTargetLowering()) {
52 initializeX86TTIPass(*PassRegistry::getPassRegistry());
55 virtual void initializePass() LLVM_OVERRIDE {
59 virtual void finalizePass() {
63 virtual void getAnalysisUsage(AnalysisUsage &AU) const LLVM_OVERRIDE {
64 TargetTransformInfo::getAnalysisUsage(AU);
67 /// Pass identification.
70 /// Provide necessary pointer adjustments for the two base classes.
71 virtual void *getAdjustedAnalysisPointer(const void *ID) LLVM_OVERRIDE {
72 if (ID == &TargetTransformInfo::ID)
73 return (TargetTransformInfo*)this;
77 /// \name Scalar TTI Implementations
79 virtual PopcntSupportKind
80 getPopcntSupport(unsigned TyWidth) const LLVM_OVERRIDE;
84 /// \name Vector TTI Implementations
87 virtual unsigned getNumberOfRegisters(bool Vector) const LLVM_OVERRIDE;
88 virtual unsigned getRegisterBitWidth(bool Vector) const LLVM_OVERRIDE;
89 virtual unsigned getMaximumUnrollFactor() const LLVM_OVERRIDE;
90 virtual unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
92 OperandValueKind) const LLVM_OVERRIDE;
93 virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
94 int Index, Type *SubTp) const LLVM_OVERRIDE;
95 virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
96 Type *Src) const LLVM_OVERRIDE;
97 virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
98 Type *CondTy) const LLVM_OVERRIDE;
99 virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
100 unsigned Index) const LLVM_OVERRIDE;
101 virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
103 unsigned AddressSpace) const LLVM_OVERRIDE;
106 getAddressComputationCost(Type *PtrTy, bool IsComplex) const LLVM_OVERRIDE;
108 virtual unsigned getReductionCost(unsigned Opcode, Type *Ty,
109 bool IsPairwiseForm) const LLVM_OVERRIDE;
111 virtual unsigned getIntImmCost(const APInt &Imm,
112 Type *Ty) const LLVM_OVERRIDE;
114 virtual unsigned getIntImmCost(unsigned Opcode, const APInt &Imm,
115 Type *Ty) const LLVM_OVERRIDE;
116 virtual unsigned getIntImmCost(Intrinsic::ID IID, const APInt &Imm,
117 Type *Ty) const LLVM_OVERRIDE;
122 } // end anonymous namespace
124 INITIALIZE_AG_PASS(X86TTI, TargetTransformInfo, "x86tti",
125 "X86 Target Transform Info", true, true, false)
129 llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) {
130 return new X86TTI(TM);
134 //===----------------------------------------------------------------------===//
138 //===----------------------------------------------------------------------===//
140 X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const {
141 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
142 // TODO: Currently the __builtin_popcount() implementation using SSE3
143 // instructions is inefficient. Once the problem is fixed, we should
144 // call ST->hasSSE3() instead of ST->hasPOPCNT().
145 return ST->hasPOPCNT() ? PSK_FastHardware : PSK_Software;
148 unsigned X86TTI::getNumberOfRegisters(bool Vector) const {
149 if (Vector && !ST->hasSSE1())
157 unsigned X86TTI::getRegisterBitWidth(bool Vector) const {
159 if (ST->hasAVX()) return 256;
160 if (ST->hasSSE1()) return 128;
170 unsigned X86TTI::getMaximumUnrollFactor() const {
174 // Sandybridge and Haswell have multiple execution ports and pipelined
182 unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
183 OperandValueKind Op1Info,
184 OperandValueKind Op2Info) const {
185 // Legalize the type.
186 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
188 int ISD = TLI->InstructionOpcodeToISD(Opcode);
189 assert(ISD && "Invalid opcode");
191 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
192 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
193 // customize them to detect the cases where shift amount is a scalar one.
194 { ISD::SHL, MVT::v4i32, 1 },
195 { ISD::SRL, MVT::v4i32, 1 },
196 { ISD::SRA, MVT::v4i32, 1 },
197 { ISD::SHL, MVT::v8i32, 1 },
198 { ISD::SRL, MVT::v8i32, 1 },
199 { ISD::SRA, MVT::v8i32, 1 },
200 { ISD::SHL, MVT::v2i64, 1 },
201 { ISD::SRL, MVT::v2i64, 1 },
202 { ISD::SHL, MVT::v4i64, 1 },
203 { ISD::SRL, MVT::v4i64, 1 },
205 { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence.
206 { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized.
208 { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized.
209 { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized.
211 { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized.
212 { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized.
213 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
215 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
216 { ISD::SDIV, MVT::v32i8, 32*20 },
217 { ISD::SDIV, MVT::v16i16, 16*20 },
218 { ISD::SDIV, MVT::v8i32, 8*20 },
219 { ISD::SDIV, MVT::v4i64, 4*20 },
220 { ISD::UDIV, MVT::v32i8, 32*20 },
221 { ISD::UDIV, MVT::v16i16, 16*20 },
222 { ISD::UDIV, MVT::v8i32, 8*20 },
223 { ISD::UDIV, MVT::v4i64, 4*20 },
226 // Look for AVX2 lowering tricks.
228 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
230 return LT.first * AVX2CostTable[Idx].Cost;
233 static const CostTblEntry<MVT::SimpleValueType>
234 SSE2UniformConstCostTable[] = {
235 // We don't correctly identify costs of casts because they are marked as
237 // Constant splats are cheaper for the following instructions.
238 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
239 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
240 { ISD::SHL, MVT::v4i32, 1 }, // pslld
241 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
243 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
244 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
245 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
246 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
248 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
249 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
250 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
253 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
255 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
257 return LT.first * SSE2UniformConstCostTable[Idx].Cost;
261 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
262 // We don't correctly identify costs of casts because they are marked as
264 // For some cases, where the shift amount is a scalar we would be able
265 // to generate better code. Unfortunately, when this is the case the value
266 // (the splat) will get hoisted out of the loop, thereby making it invisible
267 // to ISel. The cost model must return worst case assumptions because it is
268 // used for vectorization and we don't want to make vectorized code worse
270 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence.
271 { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized.
272 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
273 { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized.
275 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized.
276 { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized.
277 { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized.
278 { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized.
280 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized.
281 { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized.
282 { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
283 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
285 // It is not a good idea to vectorize division. We have to scalarize it and
286 // in the process we will often end up having to spilling regular
287 // registers. The overhead of division is going to dominate most kernels
288 // anyways so try hard to prevent vectorization of division - it is
289 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
290 // to hide "20 cycles" for each lane.
291 { ISD::SDIV, MVT::v16i8, 16*20 },
292 { ISD::SDIV, MVT::v8i16, 8*20 },
293 { ISD::SDIV, MVT::v4i32, 4*20 },
294 { ISD::SDIV, MVT::v2i64, 2*20 },
295 { ISD::UDIV, MVT::v16i8, 16*20 },
296 { ISD::UDIV, MVT::v8i16, 8*20 },
297 { ISD::UDIV, MVT::v4i32, 4*20 },
298 { ISD::UDIV, MVT::v2i64, 2*20 },
302 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second);
304 return LT.first * SSE2CostTable[Idx].Cost;
307 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
308 // We don't have to scalarize unsupported ops. We can issue two half-sized
309 // operations and we only need to extract the upper YMM half.
310 // Two ops + 1 extract + 1 insert = 4.
311 { ISD::MUL, MVT::v8i32, 4 },
312 { ISD::SUB, MVT::v8i32, 4 },
313 { ISD::ADD, MVT::v8i32, 4 },
314 { ISD::SUB, MVT::v4i64, 4 },
315 { ISD::ADD, MVT::v4i64, 4 },
316 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
317 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
318 // Because we believe v4i64 to be a legal type, we must also include the
319 // split factor of two in the cost table. Therefore, the cost here is 18
321 { ISD::MUL, MVT::v4i64, 18 },
324 // Look for AVX1 lowering tricks.
325 if (ST->hasAVX() && !ST->hasAVX2()) {
326 int Idx = CostTableLookup(AVX1CostTable, ISD, LT.second);
328 return LT.first * AVX1CostTable[Idx].Cost;
331 // Custom lowering of vectors.
332 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
333 // A v2i64/v4i64 and multiply is custom lowered as a series of long
334 // multiplies(3), shifts(4) and adds(2).
335 { ISD::MUL, MVT::v2i64, 9 },
336 { ISD::MUL, MVT::v4i64, 9 },
338 int Idx = CostTableLookup(CustomLowered, ISD, LT.second);
340 return LT.first * CustomLowered[Idx].Cost;
342 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
343 // 2x pmuludq, 2x shuffle.
344 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
348 // Fallback to the default implementation.
349 return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info,
353 unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
355 // We only estimate the cost of reverse shuffles.
356 if (Kind != SK_Reverse)
357 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
359 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
361 if (LT.second.getSizeInBits() > 128)
362 Cost = 3; // Extract + insert + copy.
364 // Multiple by the number of parts.
365 return Cost * LT.first;
368 unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
369 int ISD = TLI->InstructionOpcodeToISD(Opcode);
370 assert(ISD && "Invalid opcode");
372 std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src);
373 std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst);
375 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
377 // These are somewhat magic numbers justified by looking at the output of
378 // Intel's IACA, running some kernels and making sure when we take
379 // legalization into account the throughput will be overestimated.
380 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
381 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
382 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
383 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
384 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
385 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
386 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
387 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
388 // There are faster sequences for float conversions.
389 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
390 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
391 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
392 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
393 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
394 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
395 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
396 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
399 if (ST->hasSSE2() && !ST->hasAVX()) {
401 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second);
403 return LTSrc.first * SSE2ConvTbl[Idx].Cost;
406 EVT SrcTy = TLI->getValueType(Src);
407 EVT DstTy = TLI->getValueType(Dst);
409 // The function getSimpleVT only handles simple value types.
410 if (!SrcTy.isSimple() || !DstTy.isSimple())
411 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
413 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
414 AVXConversionTbl[] = {
415 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
416 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
417 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
418 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
419 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
420 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
421 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 1 },
422 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 1 },
423 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 2 },
425 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
426 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
427 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
428 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
429 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
430 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
431 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
432 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
433 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
434 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
435 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
436 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
438 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
439 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
440 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
441 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
442 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
443 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
444 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
445 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
446 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
447 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
448 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
449 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
451 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 1 },
452 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
453 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 6 },
454 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 9 },
455 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 8 },
456 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
457 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
458 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 3 },
462 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
463 SrcTy.getSimpleVT());
465 return AVXConversionTbl[Idx].Cost;
468 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
471 unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
472 Type *CondTy) const {
473 // Legalize the type.
474 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
478 int ISD = TLI->InstructionOpcodeToISD(Opcode);
479 assert(ISD && "Invalid opcode");
481 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
482 { ISD::SETCC, MVT::v2f64, 1 },
483 { ISD::SETCC, MVT::v4f32, 1 },
484 { ISD::SETCC, MVT::v2i64, 1 },
485 { ISD::SETCC, MVT::v4i32, 1 },
486 { ISD::SETCC, MVT::v8i16, 1 },
487 { ISD::SETCC, MVT::v16i8, 1 },
490 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
491 { ISD::SETCC, MVT::v4f64, 1 },
492 { ISD::SETCC, MVT::v8f32, 1 },
493 // AVX1 does not support 8-wide integer compare.
494 { ISD::SETCC, MVT::v4i64, 4 },
495 { ISD::SETCC, MVT::v8i32, 4 },
496 { ISD::SETCC, MVT::v16i16, 4 },
497 { ISD::SETCC, MVT::v32i8, 4 },
500 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
501 { ISD::SETCC, MVT::v4i64, 1 },
502 { ISD::SETCC, MVT::v8i32, 1 },
503 { ISD::SETCC, MVT::v16i16, 1 },
504 { ISD::SETCC, MVT::v32i8, 1 },
508 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy);
510 return LT.first * AVX2CostTbl[Idx].Cost;
514 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy);
516 return LT.first * AVX1CostTbl[Idx].Cost;
519 if (ST->hasSSE42()) {
520 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy);
522 return LT.first * SSE42CostTbl[Idx].Cost;
525 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
528 unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val,
529 unsigned Index) const {
530 assert(Val->isVectorTy() && "This must be a vector type");
533 // Legalize the type.
534 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val);
536 // This type is legalized to a scalar type.
537 if (!LT.second.isVector())
540 // The type may be split. Normalize the index to the new type.
541 unsigned Width = LT.second.getVectorNumElements();
542 Index = Index % Width;
544 // Floating point scalars are already located in index #0.
545 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
549 return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
552 unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert,
553 bool Extract) const {
554 assert (Ty->isVectorTy() && "Can only scalarize vectors");
557 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
559 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
561 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
567 unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
568 unsigned AddressSpace) const {
569 // Handle non-power-of-two vectors such as <3 x float>
570 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
571 unsigned NumElem = VTy->getVectorNumElements();
573 // Handle a few common cases:
575 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
576 // Cost = 64 bit store + extract + 32 bit store.
580 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
581 // Cost = 128 bit store + unpack + 64 bit store.
584 // Assume that all other non-power-of-two numbers are scalarized.
585 if (!isPowerOf2_32(NumElem)) {
586 unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode,
587 VTy->getScalarType(),
590 unsigned SplitCost = getScalarizationOverhead(Src,
591 Opcode == Instruction::Load,
592 Opcode==Instruction::Store);
593 return NumElem * Cost + SplitCost;
597 // Legalize the type.
598 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
599 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
602 // Each load/store unit costs 1.
603 unsigned Cost = LT.first * 1;
605 // On Sandybridge 256bit load/stores are double pumped
606 // (but not on Haswell).
607 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
613 unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
614 // Address computations in vectorized code with non-consecutive addresses will
615 // likely result in more instructions compared to scalar code where the
616 // computation can more often be merged into the index mode. The resulting
617 // extra micro-ops can significantly decrease throughput.
618 unsigned NumVectorInstToHideOverhead = 10;
620 if (Ty->isVectorTy() && IsComplex)
621 return NumVectorInstToHideOverhead;
623 return TargetTransformInfo::getAddressComputationCost(Ty, IsComplex);
626 unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy,
627 bool IsPairwise) const {
629 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
633 int ISD = TLI->InstructionOpcodeToISD(Opcode);
634 assert(ISD && "Invalid opcode");
636 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
637 // and make it as the cost.
639 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
640 { ISD::FADD, MVT::v2f64, 2 },
641 { ISD::FADD, MVT::v4f32, 4 },
642 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
643 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
644 { ISD::ADD, MVT::v8i16, 5 },
647 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
648 { ISD::FADD, MVT::v4f32, 4 },
649 { ISD::FADD, MVT::v4f64, 5 },
650 { ISD::FADD, MVT::v8f32, 7 },
651 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
652 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
653 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
654 { ISD::ADD, MVT::v8i16, 5 },
655 { ISD::ADD, MVT::v8i32, 5 },
658 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
659 { ISD::FADD, MVT::v2f64, 2 },
660 { ISD::FADD, MVT::v4f32, 4 },
661 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
662 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
663 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
666 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
667 { ISD::FADD, MVT::v4f32, 3 },
668 { ISD::FADD, MVT::v4f64, 3 },
669 { ISD::FADD, MVT::v8f32, 4 },
670 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
671 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
672 { ISD::ADD, MVT::v4i64, 3 },
673 { ISD::ADD, MVT::v8i16, 4 },
674 { ISD::ADD, MVT::v8i32, 5 },
679 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
681 return LT.first * AVX1CostTblPairWise[Idx].Cost;
684 if (ST->hasSSE42()) {
685 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
687 return LT.first * SSE42CostTblPairWise[Idx].Cost;
691 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy);
693 return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
696 if (ST->hasSSE42()) {
697 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
699 return LT.first * SSE42CostTblNoPairWise[Idx].Cost;
703 return TargetTransformInfo::getReductionCost(Opcode, ValTy, IsPairwise);
706 unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
707 assert(Ty->isIntegerTy());
709 unsigned BitSize = Ty->getPrimitiveSizeInBits();
713 if (Imm.getBitWidth() <= 64 &&
714 (isInt<32>(Imm.getSExtValue()) || isUInt<32>(Imm.getZExtValue())))
717 return 2 * TCC_Basic;
720 unsigned X86TTI::getIntImmCost(unsigned Opcode, const APInt &Imm,
722 assert(Ty->isIntegerTy());
724 unsigned BitSize = Ty->getPrimitiveSizeInBits();
729 case Instruction::Add:
730 case Instruction::Sub:
731 case Instruction::Mul:
732 case Instruction::UDiv:
733 case Instruction::SDiv:
734 case Instruction::URem:
735 case Instruction::SRem:
736 case Instruction::Shl:
737 case Instruction::LShr:
738 case Instruction::AShr:
739 case Instruction::And:
740 case Instruction::Or:
741 case Instruction::Xor:
742 case Instruction::ICmp:
743 if (Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
746 return X86TTI::getIntImmCost(Imm, Ty);
747 case Instruction::Trunc:
748 case Instruction::ZExt:
749 case Instruction::SExt:
750 case Instruction::IntToPtr:
751 case Instruction::PtrToInt:
752 case Instruction::BitCast:
753 case Instruction::Call:
754 case Instruction::Select:
755 case Instruction::Ret:
756 case Instruction::Load:
757 case Instruction::Store:
758 return X86TTI::getIntImmCost(Imm, Ty);
760 return TargetTransformInfo::getIntImmCost(Opcode, Imm, Ty);
763 unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, const APInt &Imm,
765 assert(Ty->isIntegerTy());
767 unsigned BitSize = Ty->getPrimitiveSizeInBits();
772 default: return TargetTransformInfo::getIntImmCost(IID, Imm, Ty);
773 case Intrinsic::sadd_with_overflow:
774 case Intrinsic::uadd_with_overflow:
775 case Intrinsic::ssub_with_overflow:
776 case Intrinsic::usub_with_overflow:
777 case Intrinsic::smul_with_overflow:
778 case Intrinsic::umul_with_overflow:
779 if (Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
782 return X86TTI::getIntImmCost(Imm, Ty);
783 case Intrinsic::experimental_stackmap:
784 case Intrinsic::experimental_patchpoint_void:
785 case Intrinsic::experimental_patchpoint_i64:
786 if (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue()))
789 return X86TTI::getIntImmCost(Imm, Ty);