1 //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file provides a helper that implements much of the TTI interface in
11 /// terms of the target-independent code generator and TargetLowering
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17 #define LLVM_CODEGEN_BASICTTIIMPL_H
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfoImpl.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetSubtargetInfo.h"
27 extern cl::opt<unsigned> PartialUnrollingThreshold;
30 class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
32 typedef TargetTransformInfoImplCRTPBase<T> BaseT;
33 typedef TargetTransformInfo TTI;
35 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
36 /// are set if the result needs to be inserted and/or extracted from vectors.
37 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
38 assert(Ty->isVectorTy() && "Can only scalarize vectors");
41 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
43 Cost += static_cast<T *>(this)
44 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
46 Cost += static_cast<T *>(this)
47 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
53 /// Estimate the cost overhead of SK_Alternate shuffle.
54 unsigned getAltShuffleOverhead(Type *Ty) {
55 assert(Ty->isVectorTy() && "Can only shuffle vectors");
57 // Shuffle cost is equal to the cost of extracting element from its argument
58 // plus the cost of inserting them onto the result vector.
60 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
61 // index 0 of first vector, index 1 of second vector,index 2 of first
62 // vector and finally index 3 of second vector and insert them at index
63 // <0,1,2,3> of result vector.
64 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
65 Cost += static_cast<T *>(this)
66 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
67 Cost += static_cast<T *>(this)
68 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
73 const TargetLoweringBase *getTLI() const {
74 return TM->getSubtargetImpl()->getTargetLowering();
78 const TargetMachine *TM;
80 explicit BasicTTIImplBase(const TargetMachine *TM = nullptr)
81 : BaseT(TM ? TM->getDataLayout() : nullptr), TM(TM) {}
84 // Provide value semantics. MSVC requires that we spell all of these out.
85 BasicTTIImplBase(const BasicTTIImplBase &Arg)
86 : BaseT(static_cast<const BaseT &>(Arg)), TM(Arg.TM) {}
87 BasicTTIImplBase(BasicTTIImplBase &&Arg)
88 : BaseT(std::move(static_cast<BaseT &>(Arg))), TM(std::move(Arg.TM)) {}
89 BasicTTIImplBase &operator=(const BasicTTIImplBase &RHS) {
90 BaseT::operator=(static_cast<const BaseT &>(RHS));
94 BasicTTIImplBase &operator=(BasicTTIImplBase &&RHS) {
95 BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
96 TM = std::move(RHS.TM);
100 /// \name Scalar TTI Implementations
103 bool hasBranchDivergence() { return false; }
105 bool isLegalAddImmediate(int64_t imm) {
106 return getTLI()->isLegalAddImmediate(imm);
109 bool isLegalICmpImmediate(int64_t imm) {
110 return getTLI()->isLegalICmpImmediate(imm);
113 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
114 bool HasBaseReg, int64_t Scale) {
115 TargetLoweringBase::AddrMode AM;
117 AM.BaseOffs = BaseOffset;
118 AM.HasBaseReg = HasBaseReg;
120 return getTLI()->isLegalAddressingMode(AM, Ty);
123 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
124 bool HasBaseReg, int64_t Scale) {
125 TargetLoweringBase::AddrMode AM;
127 AM.BaseOffs = BaseOffset;
128 AM.HasBaseReg = HasBaseReg;
130 return getTLI()->getScalingFactorCost(AM, Ty);
133 bool isTruncateFree(Type *Ty1, Type *Ty2) {
134 return getTLI()->isTruncateFree(Ty1, Ty2);
137 bool isTypeLegal(Type *Ty) {
138 EVT VT = getTLI()->getValueType(Ty);
139 return getTLI()->isTypeLegal(VT);
142 unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
144 unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
146 bool shouldBuildLookupTables() {
147 const TargetLoweringBase *TLI = getTLI();
148 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
149 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
152 bool haveFastSqrt(Type *Ty) {
153 const TargetLoweringBase *TLI = getTLI();
154 EVT VT = TLI->getValueType(Ty);
155 return TLI->isTypeLegal(VT) &&
156 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
159 void getUnrollingPreferences(const Function *F, Loop *L,
160 TTI::UnrollingPreferences &UP) {
161 // This unrolling functionality is target independent, but to provide some
162 // motivation for its intended use, for x86:
164 // According to the Intel 64 and IA-32 Architectures Optimization Reference
165 // Manual, Intel Core models and later have a loop stream detector (and
166 // associated uop queue) that can benefit from partial unrolling.
167 // The relevant requirements are:
168 // - The loop must have no more than 4 (8 for Nehalem and later) branches
169 // taken, and none of them may be calls.
170 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
172 // According to the Software Optimization Guide for AMD Family 15h
173 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
174 // and loop buffer which can benefit from partial unrolling.
175 // The relevant requirements are:
176 // - The loop must have fewer than 16 branches
177 // - The loop must have less than 40 uops in all executed loop branches
179 // The number of taken branches in a loop is hard to estimate here, and
180 // benchmarking has revealed that it is better not to be conservative when
181 // estimating the branch count. As a result, we'll ignore the branch limits
182 // until someone finds a case where it matters in practice.
185 const TargetSubtargetInfo *ST = TM->getSubtargetImpl(*F);
186 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
187 MaxOps = PartialUnrollingThreshold;
188 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
189 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
193 // Scan the loop: don't unroll loops with calls.
194 for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
198 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
199 if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
200 ImmutableCallSite CS(J);
201 if (const Function *F = CS.getCalledFunction()) {
202 if (!static_cast<T *>(this)->isLoweredToCall(F))
210 // Enable runtime and partial unrolling up to the specified size.
211 UP.Partial = UP.Runtime = true;
212 UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
217 /// \name Vector TTI Implementations
220 unsigned getNumberOfRegisters(bool Vector) { return 1; }
222 unsigned getRegisterBitWidth(bool Vector) { return 32; }
224 unsigned getMaxInterleaveFactor() { return 1; }
226 unsigned getArithmeticInstrCost(
227 unsigned Opcode, Type *Ty,
228 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
229 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
230 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
231 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None) {
232 // Check if any of the operands are vector operands.
233 const TargetLoweringBase *TLI = getTLI();
234 int ISD = TLI->InstructionOpcodeToISD(Opcode);
235 assert(ISD && "Invalid opcode");
237 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
239 bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
240 // Assume that floating point arithmetic operations cost twice as much as
241 // integer operations.
242 unsigned OpCost = (IsFloat ? 2 : 1);
244 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
245 // The operation is legal. Assume it costs 1.
246 // If the type is split to multiple registers, assume that there is some
248 // TODO: Once we have extract/insert subvector cost we need to use them.
250 return LT.first * 2 * OpCost;
251 return LT.first * 1 * OpCost;
254 if (!TLI->isOperationExpand(ISD, LT.second)) {
255 // If the operation is custom lowered then assume
256 // thare the code is twice as expensive.
257 return LT.first * 2 * OpCost;
260 // Else, assume that we need to scalarize this op.
261 if (Ty->isVectorTy()) {
262 unsigned Num = Ty->getVectorNumElements();
263 unsigned Cost = static_cast<T *>(this)
264 ->getArithmeticInstrCost(Opcode, Ty->getScalarType());
265 // return the cost of multiple scalar invocation plus the cost of
267 // and extracting the values.
268 return getScalarizationOverhead(Ty, true, true) + Num * Cost;
271 // We don't know anything about this scalar instruction.
275 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
277 if (Kind == TTI::SK_Alternate) {
278 return getAltShuffleOverhead(Tp);
283 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
284 const TargetLoweringBase *TLI = getTLI();
285 int ISD = TLI->InstructionOpcodeToISD(Opcode);
286 assert(ISD && "Invalid opcode");
288 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(Src);
289 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(Dst);
291 // Check for NOOP conversions.
292 if (SrcLT.first == DstLT.first &&
293 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
295 // Bitcast between types that are legalized to the same type are free.
296 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
300 if (Opcode == Instruction::Trunc &&
301 TLI->isTruncateFree(SrcLT.second, DstLT.second))
304 if (Opcode == Instruction::ZExt &&
305 TLI->isZExtFree(SrcLT.second, DstLT.second))
308 // If the cast is marked as legal (or promote) then assume low cost.
309 if (SrcLT.first == DstLT.first &&
310 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
313 // Handle scalar conversions.
314 if (!Src->isVectorTy() && !Dst->isVectorTy()) {
316 // Scalar bitcasts are usually free.
317 if (Opcode == Instruction::BitCast)
320 // Just check the op cost. If the operation is legal then assume it costs
322 if (!TLI->isOperationExpand(ISD, DstLT.second))
325 // Assume that illegal scalar instruction are expensive.
329 // Check vector-to-vector casts.
330 if (Dst->isVectorTy() && Src->isVectorTy()) {
332 // If the cast is between same-sized registers, then the check is simple.
333 if (SrcLT.first == DstLT.first &&
334 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
336 // Assume that Zext is done using AND.
337 if (Opcode == Instruction::ZExt)
340 // Assume that sext is done using SHL and SRA.
341 if (Opcode == Instruction::SExt)
344 // Just check the op cost. If the operation is legal then assume it
346 // 1 and multiply by the type-legalization overhead.
347 if (!TLI->isOperationExpand(ISD, DstLT.second))
348 return SrcLT.first * 1;
351 // If we are converting vectors and the operation is illegal, or
352 // if the vectors are legalized to different types, estimate the
353 // scalarization costs.
354 unsigned Num = Dst->getVectorNumElements();
355 unsigned Cost = static_cast<T *>(this)->getCastInstrCost(
356 Opcode, Dst->getScalarType(), Src->getScalarType());
358 // Return the cost of multiple scalar invocation plus the cost of
359 // inserting and extracting the values.
360 return getScalarizationOverhead(Dst, true, true) + Num * Cost;
363 // We already handled vector-to-vector and scalar-to-scalar conversions.
365 // is where we handle bitcast between vectors and scalars. We need to assume
366 // that the conversion is scalarized in one way or another.
367 if (Opcode == Instruction::BitCast)
368 // Illegal bitcasts are done by storing and loading from a stack slot.
369 return (Src->isVectorTy() ? getScalarizationOverhead(Src, false, true)
371 (Dst->isVectorTy() ? getScalarizationOverhead(Dst, true, false)
374 llvm_unreachable("Unhandled cast");
377 unsigned getCFInstrCost(unsigned Opcode) {
378 // Branches are assumed to be predicted.
382 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
383 const TargetLoweringBase *TLI = getTLI();
384 int ISD = TLI->InstructionOpcodeToISD(Opcode);
385 assert(ISD && "Invalid opcode");
387 // Selects on vectors are actually vector selects.
388 if (ISD == ISD::SELECT) {
389 assert(CondTy && "CondTy must exist");
390 if (CondTy->isVectorTy())
394 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
396 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
397 !TLI->isOperationExpand(ISD, LT.second)) {
398 // The operation is legal. Assume it costs 1. Multiply
399 // by the type-legalization overhead.
403 // Otherwise, assume that the cast is scalarized.
404 if (ValTy->isVectorTy()) {
405 unsigned Num = ValTy->getVectorNumElements();
407 CondTy = CondTy->getScalarType();
408 unsigned Cost = static_cast<T *>(this)->getCmpSelInstrCost(
409 Opcode, ValTy->getScalarType(), CondTy);
411 // Return the cost of multiple scalar invocation plus the cost of
413 // and extracting the values.
414 return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
417 // Unknown scalar opcode.
421 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
422 std::pair<unsigned, MVT> LT =
423 getTLI()->getTypeLegalizationCost(Val->getScalarType());
428 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
429 unsigned AddressSpace) {
430 assert(!Src->isVoidTy() && "Invalid type");
431 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Src);
433 // Assuming that all loads of legal types cost 1.
434 unsigned Cost = LT.first;
436 if (Src->isVectorTy() &&
437 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
438 // This is a vector load that legalizes to a larger type than the vector
439 // itself. Unless the corresponding extending load or truncating store is
440 // legal, then this will scalarize.
441 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
442 EVT MemVT = getTLI()->getValueType(Src, true);
443 if (MemVT.isSimple() && MemVT != MVT::Other) {
444 if (Opcode == Instruction::Store)
445 LA = getTLI()->getTruncStoreAction(LT.second, MemVT.getSimpleVT());
447 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
450 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
451 // This is a vector load/store for some illegal type that is scalarized.
452 // We must account for the cost of building or decomposing the vector.
453 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
454 Opcode == Instruction::Store);
461 unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
462 ArrayRef<Type *> Tys) {
466 // Assume that we need to scalarize this intrinsic.
467 unsigned ScalarizationCost = 0;
468 unsigned ScalarCalls = 1;
469 if (RetTy->isVectorTy()) {
470 ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
471 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
473 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
474 if (Tys[i]->isVectorTy()) {
475 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
476 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
480 return ScalarCalls + ScalarizationCost;
482 // Look for intrinsics that can be lowered directly or turned into a scalar
484 case Intrinsic::sqrt:
496 case Intrinsic::exp2:
502 case Intrinsic::log10:
505 case Intrinsic::log2:
508 case Intrinsic::fabs:
511 case Intrinsic::minnum:
514 case Intrinsic::maxnum:
517 case Intrinsic::copysign:
518 ISD = ISD::FCOPYSIGN;
520 case Intrinsic::floor:
523 case Intrinsic::ceil:
526 case Intrinsic::trunc:
529 case Intrinsic::nearbyint:
530 ISD = ISD::FNEARBYINT;
532 case Intrinsic::rint:
535 case Intrinsic::round:
544 case Intrinsic::fmuladd:
547 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
548 case Intrinsic::lifetime_start:
549 case Intrinsic::lifetime_end:
551 case Intrinsic::masked_store:
552 return static_cast<T *>(this)
553 ->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, 0);
554 case Intrinsic::masked_load:
555 return static_cast<T *>(this)
556 ->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0);
559 const TargetLoweringBase *TLI = getTLI();
560 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(RetTy);
562 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
563 // The operation is legal. Assume it costs 1.
564 // If the type is split to multiple registers, assume that there is some
566 // TODO: Once we have extract/insert subvector cost we need to use them.
572 if (!TLI->isOperationExpand(ISD, LT.second)) {
573 // If the operation is custom lowered then assume
574 // thare the code is twice as expensive.
578 // If we can't lower fmuladd into an FMA estimate the cost as a floating
579 // point mul followed by an add.
580 if (IID == Intrinsic::fmuladd)
581 return static_cast<T *>(this)
582 ->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
583 static_cast<T *>(this)
584 ->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
586 // Else, assume that we need to scalarize this intrinsic. For math builtins
587 // this will emit a costly libcall, adding call overhead and spills. Make it
589 if (RetTy->isVectorTy()) {
590 unsigned Num = RetTy->getVectorNumElements();
591 unsigned Cost = static_cast<T *>(this)->getIntrinsicInstrCost(
592 IID, RetTy->getScalarType(), Tys);
593 return 10 * Cost * Num;
596 // This is going to be turned into a library call, make it expensive.
600 unsigned getNumberOfParts(Type *Tp) {
601 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Tp);
605 unsigned getAddressComputationCost(Type *Ty, bool IsComplex) { return 0; }
607 unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise) {
608 assert(Ty->isVectorTy() && "Expect a vector type");
609 unsigned NumVecElts = Ty->getVectorNumElements();
610 unsigned NumReduxLevels = Log2_32(NumVecElts);
613 static_cast<T *>(this)->getArithmeticInstrCost(Opcode, Ty);
614 // Assume the pairwise shuffles add a cost.
615 unsigned ShuffleCost =
616 NumReduxLevels * (IsPairwise + 1) *
617 static_cast<T *>(this)
618 ->getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts / 2, Ty);
619 return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);