1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
18 #include "X86TargetMachine.h"
19 #include "llvm/ADT/DepthFirstIterator.h"
20 #include "llvm/Analysis/LoopInfo.h"
21 #include "llvm/Analysis/TargetTransformInfo.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/Debug.h"
25 #include "llvm/Target/CostTable.h"
26 #include "llvm/Target/TargetLowering.h"
29 #define DEBUG_TYPE "x86tti"
31 // Declare the pass initialization routine locally as target-specific passes
32 // don't havve a target-wide initialization entry point, and so we rely on the
33 // pass constructor initialization.
35 void initializeX86TTIPass(PassRegistry &);
39 UsePartialUnrolling("x86-use-partial-unrolling", cl::init(true),
40 cl::desc("Use partial unrolling for some X86 targets"), cl::Hidden);
41 static cl::opt<unsigned>
42 PartialUnrollingThreshold("x86-partial-unrolling-threshold", cl::init(0),
43 cl::desc("Threshold for X86 partial unrolling"), cl::Hidden);
47 class X86TTI final : public ImmutablePass, public TargetTransformInfo {
48 const X86Subtarget *ST;
49 const X86TargetLowering *TLI;
51 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
52 /// are set if the result needs to be inserted and/or extracted from vectors.
53 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
56 X86TTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) {
57 llvm_unreachable("This pass cannot be directly constructed");
60 X86TTI(const X86TargetMachine *TM)
61 : ImmutablePass(ID), ST(TM->getSubtargetImpl()),
62 TLI(TM->getTargetLowering()) {
63 initializeX86TTIPass(*PassRegistry::getPassRegistry());
66 void initializePass() override {
70 void getAnalysisUsage(AnalysisUsage &AU) const override {
71 TargetTransformInfo::getAnalysisUsage(AU);
74 /// Pass identification.
77 /// Provide necessary pointer adjustments for the two base classes.
78 void *getAdjustedAnalysisPointer(const void *ID) override {
79 if (ID == &TargetTransformInfo::ID)
80 return (TargetTransformInfo*)this;
84 /// \name Scalar TTI Implementations
86 PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
87 void getUnrollingPreferences(Loop *L,
88 UnrollingPreferences &UP) const override;
92 /// \name Vector TTI Implementations
95 unsigned getNumberOfRegisters(bool Vector) const override;
96 unsigned getRegisterBitWidth(bool Vector) const override;
97 unsigned getMaximumUnrollFactor() const override;
98 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind,
99 OperandValueKind) const override;
100 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
101 int Index, Type *SubTp) const override;
102 unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
103 Type *Src) const override;
104 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
105 Type *CondTy) const override;
106 unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
107 unsigned Index) const override;
108 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
109 unsigned AddressSpace) const override;
111 unsigned getAddressComputationCost(Type *PtrTy,
112 bool IsComplex) const override;
114 unsigned getReductionCost(unsigned Opcode, Type *Ty,
115 bool IsPairwiseForm) const override;
117 unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
119 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
120 Type *Ty) const override;
121 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
122 Type *Ty) const override;
127 } // end anonymous namespace
129 INITIALIZE_AG_PASS(X86TTI, TargetTransformInfo, "x86tti",
130 "X86 Target Transform Info", true, true, false)
134 llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) {
135 return new X86TTI(TM);
139 //===----------------------------------------------------------------------===//
143 //===----------------------------------------------------------------------===//
145 X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const {
146 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
147 // TODO: Currently the __builtin_popcount() implementation using SSE3
148 // instructions is inefficient. Once the problem is fixed, we should
149 // call ST->hasSSE3() instead of ST->hasPOPCNT().
150 return ST->hasPOPCNT() ? PSK_FastHardware : PSK_Software;
153 void X86TTI::getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const {
154 if (!UsePartialUnrolling)
156 // According to the Intel 64 and IA-32 Architectures Optimization Reference
157 // Manual, Intel Core models and later have a loop stream detector
158 // (and associated uop queue) that can benefit from partial unrolling.
159 // The relevant requirements are:
160 // - The loop must have no more than 4 (8 for Nehalem and later) branches
161 // taken, and none of them may be calls.
162 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
164 // According to the Software Optimization Guide for AMD Family 15h Processors,
165 // models 30h-4fh (Steamroller and later) have a loop predictor and loop
166 // buffer which can benefit from partial unrolling.
167 // The relevant requirements are:
168 // - The loop must have fewer than 16 branches
169 // - The loop must have less than 40 uops in all executed loop branches
171 // The number of taken branches in a loop is hard to estimate here, and
172 // benchmarking has revealed that it is better not to be conservative when
173 // estimating the branch count. As a result, we'll ignore the branch limits
174 // until someone finds a case where it matters in practice.
177 if (PartialUnrollingThreshold.getNumOccurrences() > 0) {
178 MaxOps = PartialUnrollingThreshold;
179 } else if (ST->isAtom()) {
180 // On the Atom, the throughput for taken branches is 2 cycles. For small
181 // simple loops, expand by a small factor to hide the backedge cost.
183 } else if (ST->hasFSGSBase() && ST->hasXOP() /* Steamroller and later */) {
185 } else if (ST->hasFMA4() /* Any other recent AMD */) {
187 } else if (ST->hasAVX() || ST->hasSSE42() /* Nehalem and later */) {
189 } else if (ST->hasSSSE3() /* Intel Core */) {
195 // Scan the loop: don't unroll loops with calls.
196 for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
200 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
201 if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
202 ImmutableCallSite CS(J);
203 if (const Function *F = CS.getCalledFunction()) {
204 if (!isLoweredToCall(F))
212 // Enable runtime and partial unrolling up to the specified size.
213 UP.Partial = UP.Runtime = true;
214 UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
217 unsigned X86TTI::getNumberOfRegisters(bool Vector) const {
218 if (Vector && !ST->hasSSE1())
226 unsigned X86TTI::getRegisterBitWidth(bool Vector) const {
228 if (ST->hasAVX()) return 256;
229 if (ST->hasSSE1()) return 128;
239 unsigned X86TTI::getMaximumUnrollFactor() const {
243 // Sandybridge and Haswell have multiple execution ports and pipelined
251 unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
252 OperandValueKind Op1Info,
253 OperandValueKind Op2Info) const {
254 // Legalize the type.
255 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
257 int ISD = TLI->InstructionOpcodeToISD(Opcode);
258 assert(ISD && "Invalid opcode");
260 static const CostTblEntry<MVT::SimpleValueType>
261 AVX2UniformConstCostTable[] = {
262 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
263 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
264 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
265 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
268 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
270 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second);
272 return LT.first * AVX2UniformConstCostTable[Idx].Cost;
275 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
276 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
277 // customize them to detect the cases where shift amount is a scalar one.
278 { ISD::SHL, MVT::v4i32, 1 },
279 { ISD::SRL, MVT::v4i32, 1 },
280 { ISD::SRA, MVT::v4i32, 1 },
281 { ISD::SHL, MVT::v8i32, 1 },
282 { ISD::SRL, MVT::v8i32, 1 },
283 { ISD::SRA, MVT::v8i32, 1 },
284 { ISD::SHL, MVT::v2i64, 1 },
285 { ISD::SRL, MVT::v2i64, 1 },
286 { ISD::SHL, MVT::v4i64, 1 },
287 { ISD::SRL, MVT::v4i64, 1 },
289 { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence.
290 { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized.
292 { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized.
293 { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized.
295 { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized.
296 { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized.
297 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
299 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
300 { ISD::SDIV, MVT::v32i8, 32*20 },
301 { ISD::SDIV, MVT::v16i16, 16*20 },
302 { ISD::SDIV, MVT::v8i32, 8*20 },
303 { ISD::SDIV, MVT::v4i64, 4*20 },
304 { ISD::UDIV, MVT::v32i8, 32*20 },
305 { ISD::UDIV, MVT::v16i16, 16*20 },
306 { ISD::UDIV, MVT::v8i32, 8*20 },
307 { ISD::UDIV, MVT::v4i64, 4*20 },
310 // Look for AVX2 lowering tricks.
312 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
313 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
314 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
315 // On AVX2, a packed v16i16 shift left by a constant build_vector
316 // is lowered into a vector multiply (vpmullw).
319 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
321 return LT.first * AVX2CostTable[Idx].Cost;
324 static const CostTblEntry<MVT::SimpleValueType>
325 SSE2UniformConstCostTable[] = {
326 // We don't correctly identify costs of casts because they are marked as
328 // Constant splats are cheaper for the following instructions.
329 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
330 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
331 { ISD::SHL, MVT::v4i32, 1 }, // pslld
332 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
334 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
335 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
336 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
337 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
339 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
340 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
341 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
343 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
344 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
345 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
346 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
349 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
352 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
353 return LT.first * 15;
355 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
357 return LT.first * SSE2UniformConstCostTable[Idx].Cost;
360 if (ISD == ISD::SHL &&
361 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
363 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
364 (VT == MVT::v4i32 && ST->hasSSE41()))
365 // Vector shift left by non uniform constant can be lowered
366 // into vector multiply (pmullw/pmulld).
368 if (VT == MVT::v4i32 && ST->hasSSE2())
369 // A vector shift left by non uniform constant is converted
370 // into a vector multiply; the new multiply is eventually
371 // lowered into a sequence of shuffles and 2 x pmuludq.
375 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
376 // We don't correctly identify costs of casts because they are marked as
378 // For some cases, where the shift amount is a scalar we would be able
379 // to generate better code. Unfortunately, when this is the case the value
380 // (the splat) will get hoisted out of the loop, thereby making it invisible
381 // to ISel. The cost model must return worst case assumptions because it is
382 // used for vectorization and we don't want to make vectorized code worse
384 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence.
385 { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized.
386 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
387 { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized.
388 { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized.
390 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized.
391 { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized.
392 { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized.
393 { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized.
395 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized.
396 { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized.
397 { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
398 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
400 // It is not a good idea to vectorize division. We have to scalarize it and
401 // in the process we will often end up having to spilling regular
402 // registers. The overhead of division is going to dominate most kernels
403 // anyways so try hard to prevent vectorization of division - it is
404 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
405 // to hide "20 cycles" for each lane.
406 { ISD::SDIV, MVT::v16i8, 16*20 },
407 { ISD::SDIV, MVT::v8i16, 8*20 },
408 { ISD::SDIV, MVT::v4i32, 4*20 },
409 { ISD::SDIV, MVT::v2i64, 2*20 },
410 { ISD::UDIV, MVT::v16i8, 16*20 },
411 { ISD::UDIV, MVT::v8i16, 8*20 },
412 { ISD::UDIV, MVT::v4i32, 4*20 },
413 { ISD::UDIV, MVT::v2i64, 2*20 },
417 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second);
419 return LT.first * SSE2CostTable[Idx].Cost;
422 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
423 // We don't have to scalarize unsupported ops. We can issue two half-sized
424 // operations and we only need to extract the upper YMM half.
425 // Two ops + 1 extract + 1 insert = 4.
426 { ISD::MUL, MVT::v16i16, 4 },
427 { ISD::MUL, MVT::v8i32, 4 },
428 { ISD::SUB, MVT::v8i32, 4 },
429 { ISD::ADD, MVT::v8i32, 4 },
430 { ISD::SUB, MVT::v4i64, 4 },
431 { ISD::ADD, MVT::v4i64, 4 },
432 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
433 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
434 // Because we believe v4i64 to be a legal type, we must also include the
435 // split factor of two in the cost table. Therefore, the cost here is 18
437 { ISD::MUL, MVT::v4i64, 18 },
440 // Look for AVX1 lowering tricks.
441 if (ST->hasAVX() && !ST->hasAVX2()) {
444 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
445 // sequence of extract + two vector multiply + insert.
446 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) &&
447 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)
450 int Idx = CostTableLookup(AVX1CostTable, ISD, VT);
452 return LT.first * AVX1CostTable[Idx].Cost;
455 // Custom lowering of vectors.
456 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
457 // A v2i64/v4i64 and multiply is custom lowered as a series of long
458 // multiplies(3), shifts(4) and adds(2).
459 { ISD::MUL, MVT::v2i64, 9 },
460 { ISD::MUL, MVT::v4i64, 9 },
462 int Idx = CostTableLookup(CustomLowered, ISD, LT.second);
464 return LT.first * CustomLowered[Idx].Cost;
466 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
467 // 2x pmuludq, 2x shuffle.
468 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
472 // Fallback to the default implementation.
473 return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info,
477 unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
479 // We only estimate the cost of reverse shuffles.
480 if (Kind != SK_Reverse)
481 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
483 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
485 if (LT.second.getSizeInBits() > 128)
486 Cost = 3; // Extract + insert + copy.
488 // Multiple by the number of parts.
489 return Cost * LT.first;
492 unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
493 int ISD = TLI->InstructionOpcodeToISD(Opcode);
494 assert(ISD && "Invalid opcode");
496 std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src);
497 std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst);
499 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
501 // These are somewhat magic numbers justified by looking at the output of
502 // Intel's IACA, running some kernels and making sure when we take
503 // legalization into account the throughput will be overestimated.
504 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
505 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
506 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
507 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
508 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
509 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
510 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
511 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
512 // There are faster sequences for float conversions.
513 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
514 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
515 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
516 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
517 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
518 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
519 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
520 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
523 if (ST->hasSSE2() && !ST->hasAVX()) {
525 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second);
527 return LTSrc.first * SSE2ConvTbl[Idx].Cost;
530 EVT SrcTy = TLI->getValueType(Src);
531 EVT DstTy = TLI->getValueType(Dst);
533 // The function getSimpleVT only handles simple value types.
534 if (!SrcTy.isSimple() || !DstTy.isSimple())
535 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
537 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
538 AVX2ConversionTbl[] = {
539 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
540 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
541 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
542 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
543 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
544 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
545 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
546 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
547 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
548 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
549 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
550 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
551 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
552 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
553 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
554 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
556 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
557 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
558 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
559 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
560 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
561 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
564 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
565 AVXConversionTbl[] = {
566 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
567 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
568 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
569 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
570 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
571 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
572 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
573 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
574 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
575 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
576 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
577 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
578 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
579 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
580 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
581 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
583 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
584 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
585 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
586 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
587 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
588 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
589 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
591 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
592 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
593 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
594 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
595 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
596 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
597 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
598 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
599 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
600 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
601 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
602 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
604 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
605 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
606 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
607 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
608 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
609 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
610 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
611 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
612 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
613 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
614 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
615 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
616 // The generic code to compute the scalar overhead is currently broken.
617 // Workaround this limitation by estimating the scalarization overhead
618 // here. We have roughly 10 instructions per scalar element.
619 // Multiply that by the vector width.
620 // FIXME: remove that when PR19268 is fixed.
621 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
622 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
624 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
625 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
626 // This node is expanded into scalarized operations but BasicTTI is overly
627 // optimistic estimating its cost. It computes 3 per element (one
628 // vector-extract, one scalar conversion and one vector-insert). The
629 // problem is that the inserts form a read-modify-write chain so latency
630 // should be factored in too. Inflating the cost per element by 1.
631 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
632 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
636 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
637 DstTy.getSimpleVT(), SrcTy.getSimpleVT());
639 return AVX2ConversionTbl[Idx].Cost;
643 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
644 SrcTy.getSimpleVT());
646 return AVXConversionTbl[Idx].Cost;
649 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
652 unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
653 Type *CondTy) const {
654 // Legalize the type.
655 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
659 int ISD = TLI->InstructionOpcodeToISD(Opcode);
660 assert(ISD && "Invalid opcode");
662 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
663 { ISD::SETCC, MVT::v2f64, 1 },
664 { ISD::SETCC, MVT::v4f32, 1 },
665 { ISD::SETCC, MVT::v2i64, 1 },
666 { ISD::SETCC, MVT::v4i32, 1 },
667 { ISD::SETCC, MVT::v8i16, 1 },
668 { ISD::SETCC, MVT::v16i8, 1 },
671 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
672 { ISD::SETCC, MVT::v4f64, 1 },
673 { ISD::SETCC, MVT::v8f32, 1 },
674 // AVX1 does not support 8-wide integer compare.
675 { ISD::SETCC, MVT::v4i64, 4 },
676 { ISD::SETCC, MVT::v8i32, 4 },
677 { ISD::SETCC, MVT::v16i16, 4 },
678 { ISD::SETCC, MVT::v32i8, 4 },
681 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
682 { ISD::SETCC, MVT::v4i64, 1 },
683 { ISD::SETCC, MVT::v8i32, 1 },
684 { ISD::SETCC, MVT::v16i16, 1 },
685 { ISD::SETCC, MVT::v32i8, 1 },
689 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy);
691 return LT.first * AVX2CostTbl[Idx].Cost;
695 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy);
697 return LT.first * AVX1CostTbl[Idx].Cost;
700 if (ST->hasSSE42()) {
701 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy);
703 return LT.first * SSE42CostTbl[Idx].Cost;
706 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
709 unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val,
710 unsigned Index) const {
711 assert(Val->isVectorTy() && "This must be a vector type");
714 // Legalize the type.
715 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val);
717 // This type is legalized to a scalar type.
718 if (!LT.second.isVector())
721 // The type may be split. Normalize the index to the new type.
722 unsigned Width = LT.second.getVectorNumElements();
723 Index = Index % Width;
725 // Floating point scalars are already located in index #0.
726 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
730 return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
733 unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert,
734 bool Extract) const {
735 assert (Ty->isVectorTy() && "Can only scalarize vectors");
738 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
740 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
742 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
748 unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
749 unsigned AddressSpace) const {
750 // Handle non-power-of-two vectors such as <3 x float>
751 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
752 unsigned NumElem = VTy->getVectorNumElements();
754 // Handle a few common cases:
756 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
757 // Cost = 64 bit store + extract + 32 bit store.
761 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
762 // Cost = 128 bit store + unpack + 64 bit store.
765 // Assume that all other non-power-of-two numbers are scalarized.
766 if (!isPowerOf2_32(NumElem)) {
767 unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode,
768 VTy->getScalarType(),
771 unsigned SplitCost = getScalarizationOverhead(Src,
772 Opcode == Instruction::Load,
773 Opcode==Instruction::Store);
774 return NumElem * Cost + SplitCost;
778 // Legalize the type.
779 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
780 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
783 // Each load/store unit costs 1.
784 unsigned Cost = LT.first * 1;
786 // On Sandybridge 256bit load/stores are double pumped
787 // (but not on Haswell).
788 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
794 unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
795 // Address computations in vectorized code with non-consecutive addresses will
796 // likely result in more instructions compared to scalar code where the
797 // computation can more often be merged into the index mode. The resulting
798 // extra micro-ops can significantly decrease throughput.
799 unsigned NumVectorInstToHideOverhead = 10;
801 if (Ty->isVectorTy() && IsComplex)
802 return NumVectorInstToHideOverhead;
804 return TargetTransformInfo::getAddressComputationCost(Ty, IsComplex);
807 unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy,
808 bool IsPairwise) const {
810 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
814 int ISD = TLI->InstructionOpcodeToISD(Opcode);
815 assert(ISD && "Invalid opcode");
817 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
818 // and make it as the cost.
820 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
821 { ISD::FADD, MVT::v2f64, 2 },
822 { ISD::FADD, MVT::v4f32, 4 },
823 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
824 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
825 { ISD::ADD, MVT::v8i16, 5 },
828 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
829 { ISD::FADD, MVT::v4f32, 4 },
830 { ISD::FADD, MVT::v4f64, 5 },
831 { ISD::FADD, MVT::v8f32, 7 },
832 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
833 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
834 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
835 { ISD::ADD, MVT::v8i16, 5 },
836 { ISD::ADD, MVT::v8i32, 5 },
839 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
840 { ISD::FADD, MVT::v2f64, 2 },
841 { ISD::FADD, MVT::v4f32, 4 },
842 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
843 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
844 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
847 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
848 { ISD::FADD, MVT::v4f32, 3 },
849 { ISD::FADD, MVT::v4f64, 3 },
850 { ISD::FADD, MVT::v8f32, 4 },
851 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
852 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
853 { ISD::ADD, MVT::v4i64, 3 },
854 { ISD::ADD, MVT::v8i16, 4 },
855 { ISD::ADD, MVT::v8i32, 5 },
860 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
862 return LT.first * AVX1CostTblPairWise[Idx].Cost;
865 if (ST->hasSSE42()) {
866 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
868 return LT.first * SSE42CostTblPairWise[Idx].Cost;
872 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy);
874 return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
877 if (ST->hasSSE42()) {
878 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
880 return LT.first * SSE42CostTblNoPairWise[Idx].Cost;
884 return TargetTransformInfo::getReductionCost(Opcode, ValTy, IsPairwise);
887 unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
888 assert(Ty->isIntegerTy());
890 unsigned BitSize = Ty->getPrimitiveSizeInBits();
897 if (Imm.getBitWidth() <= 64 &&
898 (isInt<32>(Imm.getSExtValue()) || isUInt<32>(Imm.getZExtValue())))
901 return 2 * TCC_Basic;
904 unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
906 assert(Ty->isIntegerTy());
908 unsigned BitSize = Ty->getPrimitiveSizeInBits();
912 unsigned ImmIdx = ~0U;
914 default: return TCC_Free;
915 case Instruction::GetElementPtr:
916 // Always hoist the base address of a GetElementPtr. This prevents the
917 // creation of new constants for every base constant that gets constant
918 // folded with the offset.
920 return 2 * TCC_Basic;
922 case Instruction::Store:
925 case Instruction::Add:
926 case Instruction::Sub:
927 case Instruction::Mul:
928 case Instruction::UDiv:
929 case Instruction::SDiv:
930 case Instruction::URem:
931 case Instruction::SRem:
932 case Instruction::And:
933 case Instruction::Or:
934 case Instruction::Xor:
935 case Instruction::ICmp:
938 // Always return TCC_Free for the shift value of a shift instruction.
939 case Instruction::Shl:
940 case Instruction::LShr:
941 case Instruction::AShr:
945 case Instruction::Trunc:
946 case Instruction::ZExt:
947 case Instruction::SExt:
948 case Instruction::IntToPtr:
949 case Instruction::PtrToInt:
950 case Instruction::BitCast:
951 case Instruction::PHI:
952 case Instruction::Call:
953 case Instruction::Select:
954 case Instruction::Ret:
955 case Instruction::Load:
959 if ((Idx == ImmIdx) &&
960 Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
963 return X86TTI::getIntImmCost(Imm, Ty);
966 unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
967 const APInt &Imm, Type *Ty) const {
968 assert(Ty->isIntegerTy());
970 unsigned BitSize = Ty->getPrimitiveSizeInBits();
975 default: return TCC_Free;
976 case Intrinsic::sadd_with_overflow:
977 case Intrinsic::uadd_with_overflow:
978 case Intrinsic::ssub_with_overflow:
979 case Intrinsic::usub_with_overflow:
980 case Intrinsic::smul_with_overflow:
981 case Intrinsic::umul_with_overflow:
982 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
985 case Intrinsic::experimental_stackmap:
986 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
989 case Intrinsic::experimental_patchpoint_void:
990 case Intrinsic::experimental_patchpoint_i64:
991 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
995 return X86TTI::getIntImmCost(Imm, Ty);