1 //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file provides a helper that implements much of the TTI interface in
11 /// terms of the target-independent code generator and TargetLowering
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17 #define LLVM_CODEGEN_BASICTTIIMPL_H
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfoImpl.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetSubtargetInfo.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
28 extern cl::opt<unsigned> PartialUnrollingThreshold;
30 /// \brief Base class which can be used to help build a TTI implementation.
32 /// This class provides as much implementation of the TTI interface as is
33 /// possible using the target independent parts of the code generator.
35 /// In order to subclass it, your class must implement a getST() method to
36 /// return the subtarget, and a getTLI() method to return the target lowering.
37 /// We need these methods implemented in the derived class so that this class
38 /// doesn't have to duplicate storage for them.
40 class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
42 typedef TargetTransformInfoImplCRTPBase<T> BaseT;
43 typedef TargetTransformInfo TTI;
45 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
46 /// are set if the result needs to be inserted and/or extracted from vectors.
47 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
48 assert(Ty->isVectorTy() && "Can only scalarize vectors");
51 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
53 Cost += static_cast<T *>(this)
54 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
56 Cost += static_cast<T *>(this)
57 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
63 /// Estimate the cost overhead of SK_Alternate shuffle.
64 unsigned getAltShuffleOverhead(Type *Ty) {
65 assert(Ty->isVectorTy() && "Can only shuffle vectors");
67 // Shuffle cost is equal to the cost of extracting element from its argument
68 // plus the cost of inserting them onto the result vector.
70 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
71 // index 0 of first vector, index 1 of second vector,index 2 of first
72 // vector and finally index 3 of second vector and insert them at index
73 // <0,1,2,3> of result vector.
74 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
75 Cost += static_cast<T *>(this)
76 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
77 Cost += static_cast<T *>(this)
78 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
83 /// \brief Local query method delegates up to T which *must* implement this!
84 const TargetSubtargetInfo *getST() const {
85 return static_cast<const T *>(this)->getST();
88 /// \brief Local query method delegates up to T which *must* implement this!
89 const TargetLoweringBase *getTLI() const {
90 return static_cast<const T *>(this)->getTLI();
94 explicit BasicTTIImplBase(const TargetMachine *TM, const DataLayout &DL)
97 using TargetTransformInfoImplBase::DL;
100 // Provide value semantics. MSVC requires that we spell all of these out.
101 BasicTTIImplBase(const BasicTTIImplBase &Arg)
102 : BaseT(static_cast<const BaseT &>(Arg)) {}
103 BasicTTIImplBase(BasicTTIImplBase &&Arg)
104 : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
106 /// \name Scalar TTI Implementations
109 bool hasBranchDivergence() { return false; }
111 bool isSourceOfDivergence(const Value *V) { return false; }
113 bool isLegalAddImmediate(int64_t imm) {
114 return getTLI()->isLegalAddImmediate(imm);
117 bool isLegalICmpImmediate(int64_t imm) {
118 return getTLI()->isLegalICmpImmediate(imm);
121 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
122 bool HasBaseReg, int64_t Scale,
123 unsigned AddrSpace) {
124 TargetLoweringBase::AddrMode AM;
126 AM.BaseOffs = BaseOffset;
127 AM.HasBaseReg = HasBaseReg;
129 return getTLI()->isLegalAddressingMode(DL, AM, Ty, AddrSpace);
132 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
133 bool HasBaseReg, int64_t Scale, unsigned AddrSpace) {
134 TargetLoweringBase::AddrMode AM;
136 AM.BaseOffs = BaseOffset;
137 AM.HasBaseReg = HasBaseReg;
139 return getTLI()->getScalingFactorCost(DL, AM, Ty, AddrSpace);
142 bool isTruncateFree(Type *Ty1, Type *Ty2) {
143 return getTLI()->isTruncateFree(Ty1, Ty2);
146 bool isProfitableToHoist(Instruction *I) {
147 return getTLI()->isProfitableToHoist(I);
150 bool isTypeLegal(Type *Ty) {
151 EVT VT = getTLI()->getValueType(DL, Ty);
152 return getTLI()->isTypeLegal(VT);
155 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
156 ArrayRef<const Value *> Arguments) {
157 return BaseT::getIntrinsicCost(IID, RetTy, Arguments);
160 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
161 ArrayRef<Type *> ParamTys) {
162 if (IID == Intrinsic::cttz) {
163 if (getTLI()->isCheapToSpeculateCttz())
164 return TargetTransformInfo::TCC_Basic;
165 return TargetTransformInfo::TCC_Expensive;
168 if (IID == Intrinsic::ctlz) {
169 if (getTLI()->isCheapToSpeculateCtlz())
170 return TargetTransformInfo::TCC_Basic;
171 return TargetTransformInfo::TCC_Expensive;
174 return BaseT::getIntrinsicCost(IID, RetTy, ParamTys);
177 unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
179 unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
181 bool shouldBuildLookupTables() {
182 const TargetLoweringBase *TLI = getTLI();
183 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
184 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
187 bool haveFastSqrt(Type *Ty) {
188 const TargetLoweringBase *TLI = getTLI();
189 EVT VT = TLI->getValueType(DL, Ty);
190 return TLI->isTypeLegal(VT) &&
191 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
194 unsigned getFPOpCost(Type *Ty) {
195 // By default, FP instructions are no more expensive since they are
196 // implemented in HW. Target specific TTI can override this.
197 return TargetTransformInfo::TCC_Basic;
200 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
201 const TargetLoweringBase *TLI = getTLI();
204 case Instruction::Trunc: {
205 if (TLI->isTruncateFree(OpTy, Ty))
206 return TargetTransformInfo::TCC_Free;
207 return TargetTransformInfo::TCC_Basic;
209 case Instruction::ZExt: {
210 if (TLI->isZExtFree(OpTy, Ty))
211 return TargetTransformInfo::TCC_Free;
212 return TargetTransformInfo::TCC_Basic;
216 return BaseT::getOperationCost(Opcode, Ty, OpTy);
219 void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP) {
220 // This unrolling functionality is target independent, but to provide some
221 // motivation for its intended use, for x86:
223 // According to the Intel 64 and IA-32 Architectures Optimization Reference
224 // Manual, Intel Core models and later have a loop stream detector (and
225 // associated uop queue) that can benefit from partial unrolling.
226 // The relevant requirements are:
227 // - The loop must have no more than 4 (8 for Nehalem and later) branches
228 // taken, and none of them may be calls.
229 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
231 // According to the Software Optimization Guide for AMD Family 15h
232 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
233 // and loop buffer which can benefit from partial unrolling.
234 // The relevant requirements are:
235 // - The loop must have fewer than 16 branches
236 // - The loop must have less than 40 uops in all executed loop branches
238 // The number of taken branches in a loop is hard to estimate here, and
239 // benchmarking has revealed that it is better not to be conservative when
240 // estimating the branch count. As a result, we'll ignore the branch limits
241 // until someone finds a case where it matters in practice.
244 const TargetSubtargetInfo *ST = getST();
245 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
246 MaxOps = PartialUnrollingThreshold;
247 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
248 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
252 // Scan the loop: don't unroll loops with calls.
253 for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
257 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
258 if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
259 ImmutableCallSite CS(&*J);
260 if (const Function *F = CS.getCalledFunction()) {
261 if (!static_cast<T *>(this)->isLoweredToCall(F))
269 // Enable runtime and partial unrolling up to the specified size.
270 UP.Partial = UP.Runtime = true;
271 UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
276 /// \name Vector TTI Implementations
279 unsigned getNumberOfRegisters(bool Vector) { return Vector ? 0 : 1; }
281 unsigned getRegisterBitWidth(bool Vector) { return 32; }
283 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
285 unsigned getArithmeticInstrCost(
286 unsigned Opcode, Type *Ty,
287 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
288 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
289 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
290 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None) {
291 // Check if any of the operands are vector operands.
292 const TargetLoweringBase *TLI = getTLI();
293 int ISD = TLI->InstructionOpcodeToISD(Opcode);
294 assert(ISD && "Invalid opcode");
296 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
298 bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
299 // Assume that floating point arithmetic operations cost twice as much as
300 // integer operations.
301 unsigned OpCost = (IsFloat ? 2 : 1);
303 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
304 // The operation is legal. Assume it costs 1.
305 // If the type is split to multiple registers, assume that there is some
307 // TODO: Once we have extract/insert subvector cost we need to use them.
309 return LT.first * 2 * OpCost;
310 return LT.first * 1 * OpCost;
313 if (!TLI->isOperationExpand(ISD, LT.second)) {
314 // If the operation is custom lowered then assume
315 // thare the code is twice as expensive.
316 return LT.first * 2 * OpCost;
319 // Else, assume that we need to scalarize this op.
320 if (Ty->isVectorTy()) {
321 unsigned Num = Ty->getVectorNumElements();
322 unsigned Cost = static_cast<T *>(this)
323 ->getArithmeticInstrCost(Opcode, Ty->getScalarType());
324 // return the cost of multiple scalar invocation plus the cost of
326 // and extracting the values.
327 return getScalarizationOverhead(Ty, true, true) + Num * Cost;
330 // We don't know anything about this scalar instruction.
334 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
336 if (Kind == TTI::SK_Alternate) {
337 return getAltShuffleOverhead(Tp);
342 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
343 const TargetLoweringBase *TLI = getTLI();
344 int ISD = TLI->InstructionOpcodeToISD(Opcode);
345 assert(ISD && "Invalid opcode");
346 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(DL, Src);
347 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(DL, Dst);
349 // Check for NOOP conversions.
350 if (SrcLT.first == DstLT.first &&
351 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
353 // Bitcast between types that are legalized to the same type are free.
354 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
358 if (Opcode == Instruction::Trunc &&
359 TLI->isTruncateFree(SrcLT.second, DstLT.second))
362 if (Opcode == Instruction::ZExt &&
363 TLI->isZExtFree(SrcLT.second, DstLT.second))
366 // If the cast is marked as legal (or promote) then assume low cost.
367 if (SrcLT.first == DstLT.first &&
368 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
371 // Handle scalar conversions.
372 if (!Src->isVectorTy() && !Dst->isVectorTy()) {
374 // Scalar bitcasts are usually free.
375 if (Opcode == Instruction::BitCast)
378 // Just check the op cost. If the operation is legal then assume it costs
380 if (!TLI->isOperationExpand(ISD, DstLT.second))
383 // Assume that illegal scalar instruction are expensive.
387 // Check vector-to-vector casts.
388 if (Dst->isVectorTy() && Src->isVectorTy()) {
390 // If the cast is between same-sized registers, then the check is simple.
391 if (SrcLT.first == DstLT.first &&
392 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
394 // Assume that Zext is done using AND.
395 if (Opcode == Instruction::ZExt)
398 // Assume that sext is done using SHL and SRA.
399 if (Opcode == Instruction::SExt)
402 // Just check the op cost. If the operation is legal then assume it
404 // 1 and multiply by the type-legalization overhead.
405 if (!TLI->isOperationExpand(ISD, DstLT.second))
406 return SrcLT.first * 1;
409 // If we are converting vectors and the operation is illegal, or
410 // if the vectors are legalized to different types, estimate the
411 // scalarization costs.
412 unsigned Num = Dst->getVectorNumElements();
413 unsigned Cost = static_cast<T *>(this)->getCastInstrCost(
414 Opcode, Dst->getScalarType(), Src->getScalarType());
416 // Return the cost of multiple scalar invocation plus the cost of
417 // inserting and extracting the values.
418 return getScalarizationOverhead(Dst, true, true) + Num * Cost;
421 // We already handled vector-to-vector and scalar-to-scalar conversions.
423 // is where we handle bitcast between vectors and scalars. We need to assume
424 // that the conversion is scalarized in one way or another.
425 if (Opcode == Instruction::BitCast)
426 // Illegal bitcasts are done by storing and loading from a stack slot.
427 return (Src->isVectorTy() ? getScalarizationOverhead(Src, false, true)
429 (Dst->isVectorTy() ? getScalarizationOverhead(Dst, true, false)
432 llvm_unreachable("Unhandled cast");
435 unsigned getCFInstrCost(unsigned Opcode) {
436 // Branches are assumed to be predicted.
440 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
441 const TargetLoweringBase *TLI = getTLI();
442 int ISD = TLI->InstructionOpcodeToISD(Opcode);
443 assert(ISD && "Invalid opcode");
445 // Selects on vectors are actually vector selects.
446 if (ISD == ISD::SELECT) {
447 assert(CondTy && "CondTy must exist");
448 if (CondTy->isVectorTy())
451 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
453 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
454 !TLI->isOperationExpand(ISD, LT.second)) {
455 // The operation is legal. Assume it costs 1. Multiply
456 // by the type-legalization overhead.
460 // Otherwise, assume that the cast is scalarized.
461 if (ValTy->isVectorTy()) {
462 unsigned Num = ValTy->getVectorNumElements();
464 CondTy = CondTy->getScalarType();
465 unsigned Cost = static_cast<T *>(this)->getCmpSelInstrCost(
466 Opcode, ValTy->getScalarType(), CondTy);
468 // Return the cost of multiple scalar invocation plus the cost of
470 // and extracting the values.
471 return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
474 // Unknown scalar opcode.
478 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
479 std::pair<unsigned, MVT> LT =
480 getTLI()->getTypeLegalizationCost(DL, Val->getScalarType());
485 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
486 unsigned AddressSpace) {
487 assert(!Src->isVoidTy() && "Invalid type");
488 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Src);
490 // Assuming that all loads of legal types cost 1.
491 unsigned Cost = LT.first;
493 if (Src->isVectorTy() &&
494 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
495 // This is a vector load that legalizes to a larger type than the vector
496 // itself. Unless the corresponding extending load or truncating store is
497 // legal, then this will scalarize.
498 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
499 EVT MemVT = getTLI()->getValueType(DL, Src);
500 if (Opcode == Instruction::Store)
501 LA = getTLI()->getTruncStoreAction(LT.second, MemVT);
503 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
505 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
506 // This is a vector load/store for some illegal type that is scalarized.
507 // We must account for the cost of building or decomposing the vector.
508 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
509 Opcode == Instruction::Store);
516 unsigned getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
518 ArrayRef<unsigned> Indices,
520 unsigned AddressSpace) {
521 VectorType *VT = dyn_cast<VectorType>(VecTy);
522 assert(VT && "Expect a vector type for interleaved memory op");
524 unsigned NumElts = VT->getNumElements();
525 assert(Factor > 1 && NumElts % Factor == 0 && "Invalid interleave factor");
527 unsigned NumSubElts = NumElts / Factor;
528 VectorType *SubVT = VectorType::get(VT->getElementType(), NumSubElts);
530 // Firstly, the cost of load/store operation.
531 unsigned Cost = static_cast<T *>(this)->getMemoryOpCost(
532 Opcode, VecTy, Alignment, AddressSpace);
534 // Then plus the cost of interleave operation.
535 if (Opcode == Instruction::Load) {
536 // The interleave cost is similar to extract sub vectors' elements
537 // from the wide vector, and insert them into sub vectors.
539 // E.g. An interleaved load of factor 2 (with one member of index 0):
540 // %vec = load <8 x i32>, <8 x i32>* %ptr
541 // %v0 = shuffle %vec, undef, <0, 2, 4, 6> ; Index 0
542 // The cost is estimated as extract elements at 0, 2, 4, 6 from the
543 // <8 x i32> vector and insert them into a <4 x i32> vector.
545 assert(Indices.size() <= Factor &&
546 "Interleaved memory op has too many members");
548 for (unsigned Index : Indices) {
549 assert(Index < Factor && "Invalid index for interleaved memory op");
551 // Extract elements from loaded vector for each sub vector.
552 for (unsigned i = 0; i < NumSubElts; i++)
553 Cost += static_cast<T *>(this)->getVectorInstrCost(
554 Instruction::ExtractElement, VT, Index + i * Factor);
557 unsigned InsSubCost = 0;
558 for (unsigned i = 0; i < NumSubElts; i++)
559 InsSubCost += static_cast<T *>(this)->getVectorInstrCost(
560 Instruction::InsertElement, SubVT, i);
562 Cost += Indices.size() * InsSubCost;
564 // The interleave cost is extract all elements from sub vectors, and
565 // insert them into the wide vector.
567 // E.g. An interleaved store of factor 2:
568 // %v0_v1 = shuffle %v0, %v1, <0, 4, 1, 5, 2, 6, 3, 7>
569 // store <8 x i32> %interleaved.vec, <8 x i32>* %ptr
570 // The cost is estimated as extract all elements from both <4 x i32>
571 // vectors and insert into the <8 x i32> vector.
573 unsigned ExtSubCost = 0;
574 for (unsigned i = 0; i < NumSubElts; i++)
575 ExtSubCost += static_cast<T *>(this)->getVectorInstrCost(
576 Instruction::ExtractElement, SubVT, i);
577 Cost += ExtSubCost * Factor;
579 for (unsigned i = 0; i < NumElts; i++)
580 Cost += static_cast<T *>(this)
581 ->getVectorInstrCost(Instruction::InsertElement, VT, i);
587 unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
588 ArrayRef<Type *> Tys) {
592 // Assume that we need to scalarize this intrinsic.
593 unsigned ScalarizationCost = 0;
594 unsigned ScalarCalls = 1;
595 Type *ScalarRetTy = RetTy;
596 if (RetTy->isVectorTy()) {
597 ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
598 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
599 ScalarRetTy = RetTy->getScalarType();
601 SmallVector<Type *, 4> ScalarTys;
602 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
604 if (Ty->isVectorTy()) {
605 ScalarizationCost += getScalarizationOverhead(Ty, false, true);
606 ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements());
607 Ty = Ty->getScalarType();
609 ScalarTys.push_back(Ty);
611 if (ScalarCalls == 1)
612 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
614 unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
615 IID, ScalarRetTy, ScalarTys);
617 return ScalarCalls * ScalarCost + ScalarizationCost;
619 // Look for intrinsics that can be lowered directly or turned into a scalar
621 case Intrinsic::sqrt:
633 case Intrinsic::exp2:
639 case Intrinsic::log10:
642 case Intrinsic::log2:
645 case Intrinsic::fabs:
648 case Intrinsic::minnum:
651 case Intrinsic::maxnum:
654 case Intrinsic::copysign:
655 ISD = ISD::FCOPYSIGN;
657 case Intrinsic::floor:
660 case Intrinsic::ceil:
663 case Intrinsic::trunc:
666 case Intrinsic::nearbyint:
667 ISD = ISD::FNEARBYINT;
669 case Intrinsic::rint:
672 case Intrinsic::round:
681 case Intrinsic::fmuladd:
684 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
685 case Intrinsic::lifetime_start:
686 case Intrinsic::lifetime_end:
688 case Intrinsic::masked_store:
689 return static_cast<T *>(this)
690 ->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, 0);
691 case Intrinsic::masked_load:
692 return static_cast<T *>(this)
693 ->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0);
696 const TargetLoweringBase *TLI = getTLI();
697 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, RetTy);
699 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
700 // The operation is legal. Assume it costs 1.
701 // If the type is split to multiple registers, assume that there is some
703 // TODO: Once we have extract/insert subvector cost we need to use them.
709 if (!TLI->isOperationExpand(ISD, LT.second)) {
710 // If the operation is custom lowered then assume
711 // thare the code is twice as expensive.
715 // If we can't lower fmuladd into an FMA estimate the cost as a floating
716 // point mul followed by an add.
717 if (IID == Intrinsic::fmuladd)
718 return static_cast<T *>(this)
719 ->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
720 static_cast<T *>(this)
721 ->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
723 // Else, assume that we need to scalarize this intrinsic. For math builtins
724 // this will emit a costly libcall, adding call overhead and spills. Make it
726 if (RetTy->isVectorTy()) {
727 unsigned ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
728 unsigned ScalarCalls = RetTy->getVectorNumElements();
729 SmallVector<Type *, 4> ScalarTys;
730 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
732 if (Ty->isVectorTy())
733 Ty = Ty->getScalarType();
734 ScalarTys.push_back(Ty);
736 unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
737 IID, RetTy->getScalarType(), ScalarTys);
738 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
739 if (Tys[i]->isVectorTy()) {
740 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
741 ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
745 return ScalarCalls * ScalarCost + ScalarizationCost;
748 // This is going to be turned into a library call, make it expensive.
752 /// \brief Compute a cost of the given call instruction.
754 /// Compute the cost of calling function F with return type RetTy and
755 /// argument types Tys. F might be nullptr, in this case the cost of an
756 /// arbitrary call with the specified signature will be returned.
757 /// This is used, for instance, when we estimate call of a vector
758 /// counterpart of the given function.
759 /// \param F Called function, might be nullptr.
760 /// \param RetTy Return value types.
761 /// \param Tys Argument types.
762 /// \returns The cost of Call instruction.
763 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
767 unsigned getNumberOfParts(Type *Tp) {
768 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(DL, Tp);
772 unsigned getAddressComputationCost(Type *Ty, bool IsComplex) { return 0; }
774 unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise) {
775 assert(Ty->isVectorTy() && "Expect a vector type");
776 unsigned NumVecElts = Ty->getVectorNumElements();
777 unsigned NumReduxLevels = Log2_32(NumVecElts);
780 static_cast<T *>(this)->getArithmeticInstrCost(Opcode, Ty);
781 // Assume the pairwise shuffles add a cost.
782 unsigned ShuffleCost =
783 NumReduxLevels * (IsPairwise + 1) *
784 static_cast<T *>(this)
785 ->getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts / 2, Ty);
786 return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
792 /// \brief Concrete BasicTTIImpl that can be used if no further customization
794 class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
795 typedef BasicTTIImplBase<BasicTTIImpl> BaseT;
796 friend class BasicTTIImplBase<BasicTTIImpl>;
798 const TargetSubtargetInfo *ST;
799 const TargetLoweringBase *TLI;
801 const TargetSubtargetInfo *getST() const { return ST; }
802 const TargetLoweringBase *getTLI() const { return TLI; }
805 explicit BasicTTIImpl(const TargetMachine *ST, const Function &F);
807 // Provide value semantics. MSVC requires that we spell all of these out.
808 BasicTTIImpl(const BasicTTIImpl &Arg)
809 : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
810 BasicTTIImpl(BasicTTIImpl &&Arg)
811 : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
812 TLI(std::move(Arg.TLI)) {}