1 //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file provides a helper that implements much of the TTI interface in
11 /// terms of the target-independent code generator and TargetLowering
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17 #define LLVM_CODEGEN_BASICTTIIMPL_H
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfoImpl.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetSubtargetInfo.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
28 extern cl::opt<unsigned> PartialUnrollingThreshold;
30 /// \brief Base class which can be used to help build a TTI implementation.
32 /// This class provides as much implementation of the TTI interface as is
33 /// possible using the target independent parts of the code generator.
35 /// In order to subclass it, your class must implement a getST() method to
36 /// return the subtarget, and a getTLI() method to return the target lowering.
37 /// We need these methods implemented in the derived class so that this class
38 /// doesn't have to duplicate storage for them.
40 class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
42 typedef TargetTransformInfoImplCRTPBase<T> BaseT;
43 typedef TargetTransformInfo TTI;
45 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
46 /// are set if the result needs to be inserted and/or extracted from vectors.
47 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
48 assert(Ty->isVectorTy() && "Can only scalarize vectors");
51 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
53 Cost += static_cast<T *>(this)
54 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
56 Cost += static_cast<T *>(this)
57 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
63 /// Estimate the cost overhead of SK_Alternate shuffle.
64 unsigned getAltShuffleOverhead(Type *Ty) {
65 assert(Ty->isVectorTy() && "Can only shuffle vectors");
67 // Shuffle cost is equal to the cost of extracting element from its argument
68 // plus the cost of inserting them onto the result vector.
70 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
71 // index 0 of first vector, index 1 of second vector,index 2 of first
72 // vector and finally index 3 of second vector and insert them at index
73 // <0,1,2,3> of result vector.
74 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
75 Cost += static_cast<T *>(this)
76 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
77 Cost += static_cast<T *>(this)
78 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
83 /// \brief Local query method delegates up to T which *must* implement this!
84 const TargetSubtargetInfo *getST() const {
85 return static_cast<const T *>(this)->getST();
88 /// \brief Local query method delegates up to T which *must* implement this!
89 const TargetLoweringBase *getTLI() const {
90 return static_cast<const T *>(this)->getTLI();
94 explicit BasicTTIImplBase(const TargetMachine *TM)
95 : BaseT(TM->getDataLayout()) {}
98 // Provide value semantics. MSVC requires that we spell all of these out.
99 BasicTTIImplBase(const BasicTTIImplBase &Arg)
100 : BaseT(static_cast<const BaseT &>(Arg)) {}
101 BasicTTIImplBase(BasicTTIImplBase &&Arg)
102 : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
103 BasicTTIImplBase &operator=(const BasicTTIImplBase &RHS) {
104 BaseT::operator=(static_cast<const BaseT &>(RHS));
107 BasicTTIImplBase &operator=(BasicTTIImplBase &&RHS) {
108 BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
112 /// \name Scalar TTI Implementations
115 bool hasBranchDivergence() { return false; }
117 bool isLegalAddImmediate(int64_t imm) {
118 return getTLI()->isLegalAddImmediate(imm);
121 bool isLegalICmpImmediate(int64_t imm) {
122 return getTLI()->isLegalICmpImmediate(imm);
125 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
126 bool HasBaseReg, int64_t Scale) {
127 TargetLoweringBase::AddrMode AM;
129 AM.BaseOffs = BaseOffset;
130 AM.HasBaseReg = HasBaseReg;
132 return getTLI()->isLegalAddressingMode(AM, Ty);
135 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
136 bool HasBaseReg, int64_t Scale) {
137 TargetLoweringBase::AddrMode AM;
139 AM.BaseOffs = BaseOffset;
140 AM.HasBaseReg = HasBaseReg;
142 return getTLI()->getScalingFactorCost(AM, Ty);
145 bool isTruncateFree(Type *Ty1, Type *Ty2) {
146 return getTLI()->isTruncateFree(Ty1, Ty2);
149 bool isProfitableToHoist(Instruction *I) {
150 return getTLI()->isProfitableToHoist(I);
153 bool isTypeLegal(Type *Ty) {
154 EVT VT = getTLI()->getValueType(Ty);
155 return getTLI()->isTypeLegal(VT);
158 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
159 ArrayRef<const Value *> Arguments) {
160 return BaseT::getIntrinsicCost(IID, RetTy, Arguments);
163 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
164 ArrayRef<Type *> ParamTys) {
165 if (IID == Intrinsic::cttz) {
166 if (getTLI()->isCheapToSpeculateCttz())
167 return TargetTransformInfo::TCC_Basic;
168 return TargetTransformInfo::TCC_Expensive;
171 if (IID == Intrinsic::ctlz) {
172 if (getTLI()->isCheapToSpeculateCtlz())
173 return TargetTransformInfo::TCC_Basic;
174 return TargetTransformInfo::TCC_Expensive;
177 return BaseT::getIntrinsicCost(IID, RetTy, ParamTys);
180 unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
182 unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
184 bool shouldBuildLookupTables() {
185 const TargetLoweringBase *TLI = getTLI();
186 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
187 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
190 bool haveFastSqrt(Type *Ty) {
191 const TargetLoweringBase *TLI = getTLI();
192 EVT VT = TLI->getValueType(Ty);
193 return TLI->isTypeLegal(VT) &&
194 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
197 unsigned getFPOpCost(Type *Ty) {
198 // By default, FP instructions are no more expensive since they are
199 // implemented in HW. Target specific TTI can override this.
200 return TargetTransformInfo::TCC_Basic;
203 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
204 const TargetLoweringBase *TLI = getTLI();
207 case Instruction::Trunc: {
208 if (TLI->isTruncateFree(OpTy, Ty))
209 return TargetTransformInfo::TCC_Free;
210 return TargetTransformInfo::TCC_Basic;
212 case Instruction::ZExt: {
213 if (TLI->isZExtFree(OpTy, Ty))
214 return TargetTransformInfo::TCC_Free;
215 return TargetTransformInfo::TCC_Basic;
219 return BaseT::getOperationCost(Opcode, Ty, OpTy);
222 void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP) {
223 // This unrolling functionality is target independent, but to provide some
224 // motivation for its intended use, for x86:
226 // According to the Intel 64 and IA-32 Architectures Optimization Reference
227 // Manual, Intel Core models and later have a loop stream detector (and
228 // associated uop queue) that can benefit from partial unrolling.
229 // The relevant requirements are:
230 // - The loop must have no more than 4 (8 for Nehalem and later) branches
231 // taken, and none of them may be calls.
232 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
234 // According to the Software Optimization Guide for AMD Family 15h
235 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
236 // and loop buffer which can benefit from partial unrolling.
237 // The relevant requirements are:
238 // - The loop must have fewer than 16 branches
239 // - The loop must have less than 40 uops in all executed loop branches
241 // The number of taken branches in a loop is hard to estimate here, and
242 // benchmarking has revealed that it is better not to be conservative when
243 // estimating the branch count. As a result, we'll ignore the branch limits
244 // until someone finds a case where it matters in practice.
247 const TargetSubtargetInfo *ST = getST();
248 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
249 MaxOps = PartialUnrollingThreshold;
250 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
251 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
255 // Scan the loop: don't unroll loops with calls.
256 for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
260 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
261 if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
262 ImmutableCallSite CS(J);
263 if (const Function *F = CS.getCalledFunction()) {
264 if (!static_cast<T *>(this)->isLoweredToCall(F))
272 // Enable runtime and partial unrolling up to the specified size.
273 UP.Partial = UP.Runtime = true;
274 UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
279 /// \name Vector TTI Implementations
282 unsigned getNumberOfRegisters(bool Vector) { return 1; }
284 unsigned getRegisterBitWidth(bool Vector) { return 32; }
286 unsigned getMaxInterleaveFactor() { return 1; }
288 unsigned getArithmeticInstrCost(
289 unsigned Opcode, Type *Ty,
290 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
291 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
292 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
293 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None) {
294 // Check if any of the operands are vector operands.
295 const TargetLoweringBase *TLI = getTLI();
296 int ISD = TLI->InstructionOpcodeToISD(Opcode);
297 assert(ISD && "Invalid opcode");
299 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
301 bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
302 // Assume that floating point arithmetic operations cost twice as much as
303 // integer operations.
304 unsigned OpCost = (IsFloat ? 2 : 1);
306 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
307 // The operation is legal. Assume it costs 1.
308 // If the type is split to multiple registers, assume that there is some
310 // TODO: Once we have extract/insert subvector cost we need to use them.
312 return LT.first * 2 * OpCost;
313 return LT.first * 1 * OpCost;
316 if (!TLI->isOperationExpand(ISD, LT.second)) {
317 // If the operation is custom lowered then assume
318 // thare the code is twice as expensive.
319 return LT.first * 2 * OpCost;
322 // Else, assume that we need to scalarize this op.
323 if (Ty->isVectorTy()) {
324 unsigned Num = Ty->getVectorNumElements();
325 unsigned Cost = static_cast<T *>(this)
326 ->getArithmeticInstrCost(Opcode, Ty->getScalarType());
327 // return the cost of multiple scalar invocation plus the cost of
329 // and extracting the values.
330 return getScalarizationOverhead(Ty, true, true) + Num * Cost;
333 // We don't know anything about this scalar instruction.
337 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
339 if (Kind == TTI::SK_Alternate) {
340 return getAltShuffleOverhead(Tp);
345 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
346 const TargetLoweringBase *TLI = getTLI();
347 int ISD = TLI->InstructionOpcodeToISD(Opcode);
348 assert(ISD && "Invalid opcode");
350 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(Src);
351 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(Dst);
353 // Check for NOOP conversions.
354 if (SrcLT.first == DstLT.first &&
355 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
357 // Bitcast between types that are legalized to the same type are free.
358 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
362 if (Opcode == Instruction::Trunc &&
363 TLI->isTruncateFree(SrcLT.second, DstLT.second))
366 if (Opcode == Instruction::ZExt &&
367 TLI->isZExtFree(SrcLT.second, DstLT.second))
370 // If the cast is marked as legal (or promote) then assume low cost.
371 if (SrcLT.first == DstLT.first &&
372 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
375 // Handle scalar conversions.
376 if (!Src->isVectorTy() && !Dst->isVectorTy()) {
378 // Scalar bitcasts are usually free.
379 if (Opcode == Instruction::BitCast)
382 // Just check the op cost. If the operation is legal then assume it costs
384 if (!TLI->isOperationExpand(ISD, DstLT.second))
387 // Assume that illegal scalar instruction are expensive.
391 // Check vector-to-vector casts.
392 if (Dst->isVectorTy() && Src->isVectorTy()) {
394 // If the cast is between same-sized registers, then the check is simple.
395 if (SrcLT.first == DstLT.first &&
396 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
398 // Assume that Zext is done using AND.
399 if (Opcode == Instruction::ZExt)
402 // Assume that sext is done using SHL and SRA.
403 if (Opcode == Instruction::SExt)
406 // Just check the op cost. If the operation is legal then assume it
408 // 1 and multiply by the type-legalization overhead.
409 if (!TLI->isOperationExpand(ISD, DstLT.second))
410 return SrcLT.first * 1;
413 // If we are converting vectors and the operation is illegal, or
414 // if the vectors are legalized to different types, estimate the
415 // scalarization costs.
416 unsigned Num = Dst->getVectorNumElements();
417 unsigned Cost = static_cast<T *>(this)->getCastInstrCost(
418 Opcode, Dst->getScalarType(), Src->getScalarType());
420 // Return the cost of multiple scalar invocation plus the cost of
421 // inserting and extracting the values.
422 return getScalarizationOverhead(Dst, true, true) + Num * Cost;
425 // We already handled vector-to-vector and scalar-to-scalar conversions.
427 // is where we handle bitcast between vectors and scalars. We need to assume
428 // that the conversion is scalarized in one way or another.
429 if (Opcode == Instruction::BitCast)
430 // Illegal bitcasts are done by storing and loading from a stack slot.
431 return (Src->isVectorTy() ? getScalarizationOverhead(Src, false, true)
433 (Dst->isVectorTy() ? getScalarizationOverhead(Dst, true, false)
436 llvm_unreachable("Unhandled cast");
439 unsigned getCFInstrCost(unsigned Opcode) {
440 // Branches are assumed to be predicted.
444 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
445 const TargetLoweringBase *TLI = getTLI();
446 int ISD = TLI->InstructionOpcodeToISD(Opcode);
447 assert(ISD && "Invalid opcode");
449 // Selects on vectors are actually vector selects.
450 if (ISD == ISD::SELECT) {
451 assert(CondTy && "CondTy must exist");
452 if (CondTy->isVectorTy())
456 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
458 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
459 !TLI->isOperationExpand(ISD, LT.second)) {
460 // The operation is legal. Assume it costs 1. Multiply
461 // by the type-legalization overhead.
465 // Otherwise, assume that the cast is scalarized.
466 if (ValTy->isVectorTy()) {
467 unsigned Num = ValTy->getVectorNumElements();
469 CondTy = CondTy->getScalarType();
470 unsigned Cost = static_cast<T *>(this)->getCmpSelInstrCost(
471 Opcode, ValTy->getScalarType(), CondTy);
473 // Return the cost of multiple scalar invocation plus the cost of
475 // and extracting the values.
476 return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
479 // Unknown scalar opcode.
483 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
484 std::pair<unsigned, MVT> LT =
485 getTLI()->getTypeLegalizationCost(Val->getScalarType());
490 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
491 unsigned AddressSpace) {
492 assert(!Src->isVoidTy() && "Invalid type");
493 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Src);
495 // Assuming that all loads of legal types cost 1.
496 unsigned Cost = LT.first;
498 if (Src->isVectorTy() &&
499 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
500 // This is a vector load that legalizes to a larger type than the vector
501 // itself. Unless the corresponding extending load or truncating store is
502 // legal, then this will scalarize.
503 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
504 EVT MemVT = getTLI()->getValueType(Src, true);
505 if (MemVT.isSimple() && MemVT != MVT::Other) {
506 if (Opcode == Instruction::Store)
507 LA = getTLI()->getTruncStoreAction(LT.second, MemVT.getSimpleVT());
509 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
512 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
513 // This is a vector load/store for some illegal type that is scalarized.
514 // We must account for the cost of building or decomposing the vector.
515 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
516 Opcode == Instruction::Store);
523 unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
524 ArrayRef<Type *> Tys) {
528 // Assume that we need to scalarize this intrinsic.
529 unsigned ScalarizationCost = 0;
530 unsigned ScalarCalls = 1;
531 Type *ScalarRetTy = RetTy;
532 if (RetTy->isVectorTy()) {
533 ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
534 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
535 ScalarRetTy = RetTy->getScalarType();
537 SmallVector<Type *, 4> ScalarTys;
538 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
540 if (Ty->isVectorTy()) {
541 ScalarizationCost += getScalarizationOverhead(Ty, false, true);
542 ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements());
543 Ty = Ty->getScalarType();
545 ScalarTys.push_back(Ty);
547 if (ScalarCalls == 1)
548 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
550 unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
551 IID, ScalarRetTy, ScalarTys);
553 return ScalarCalls * ScalarCost + ScalarizationCost;
555 // Look for intrinsics that can be lowered directly or turned into a scalar
557 case Intrinsic::sqrt:
569 case Intrinsic::exp2:
575 case Intrinsic::log10:
578 case Intrinsic::log2:
581 case Intrinsic::fabs:
584 case Intrinsic::minnum:
587 case Intrinsic::maxnum:
590 case Intrinsic::copysign:
591 ISD = ISD::FCOPYSIGN;
593 case Intrinsic::floor:
596 case Intrinsic::ceil:
599 case Intrinsic::trunc:
602 case Intrinsic::nearbyint:
603 ISD = ISD::FNEARBYINT;
605 case Intrinsic::rint:
608 case Intrinsic::round:
617 case Intrinsic::fmuladd:
620 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
621 case Intrinsic::lifetime_start:
622 case Intrinsic::lifetime_end:
624 case Intrinsic::masked_store:
625 return static_cast<T *>(this)
626 ->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, 0);
627 case Intrinsic::masked_load:
628 return static_cast<T *>(this)
629 ->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0);
632 const TargetLoweringBase *TLI = getTLI();
633 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(RetTy);
635 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
636 // The operation is legal. Assume it costs 1.
637 // If the type is split to multiple registers, assume that there is some
639 // TODO: Once we have extract/insert subvector cost we need to use them.
645 if (!TLI->isOperationExpand(ISD, LT.second)) {
646 // If the operation is custom lowered then assume
647 // thare the code is twice as expensive.
651 // If we can't lower fmuladd into an FMA estimate the cost as a floating
652 // point mul followed by an add.
653 if (IID == Intrinsic::fmuladd)
654 return static_cast<T *>(this)
655 ->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
656 static_cast<T *>(this)
657 ->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
659 // Else, assume that we need to scalarize this intrinsic. For math builtins
660 // this will emit a costly libcall, adding call overhead and spills. Make it
662 if (RetTy->isVectorTy()) {
663 unsigned ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
664 unsigned ScalarCalls = RetTy->getVectorNumElements();
665 SmallVector<Type *, 4> ScalarTys;
666 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
668 if (Ty->isVectorTy())
669 Ty = Ty->getScalarType();
670 ScalarTys.push_back(Ty);
672 unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
673 IID, RetTy->getScalarType(), ScalarTys);
674 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
675 if (Tys[i]->isVectorTy()) {
676 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
677 ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
681 return ScalarCalls * ScalarCost + ScalarizationCost;
684 // This is going to be turned into a library call, make it expensive.
688 /// \brief Compute a cost of the given call instruction.
690 /// Compute the cost of calling function F with return type RetTy and
691 /// argument types Tys. F might be nullptr, in this case the cost of an
692 /// arbitrary call with the specified signature will be returned.
693 /// This is used, for instance, when we estimate call of a vector
694 /// counterpart of the given function.
695 /// \param F Called function, might be nullptr.
696 /// \param RetTy,Tys Return value and argument types.
697 /// \returns The cost of Call instruction.
698 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
702 unsigned getNumberOfParts(Type *Tp) {
703 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Tp);
707 unsigned getAddressComputationCost(Type *Ty, bool IsComplex) { return 0; }
709 unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise) {
710 assert(Ty->isVectorTy() && "Expect a vector type");
711 unsigned NumVecElts = Ty->getVectorNumElements();
712 unsigned NumReduxLevels = Log2_32(NumVecElts);
715 static_cast<T *>(this)->getArithmeticInstrCost(Opcode, Ty);
716 // Assume the pairwise shuffles add a cost.
717 unsigned ShuffleCost =
718 NumReduxLevels * (IsPairwise + 1) *
719 static_cast<T *>(this)
720 ->getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts / 2, Ty);
721 return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
727 /// \brief Concrete BasicTTIImpl that can be used if no further customization
729 class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
730 typedef BasicTTIImplBase<BasicTTIImpl> BaseT;
731 friend class BasicTTIImplBase<BasicTTIImpl>;
733 const TargetSubtargetInfo *ST;
734 const TargetLoweringBase *TLI;
736 const TargetSubtargetInfo *getST() const { return ST; }
737 const TargetLoweringBase *getTLI() const { return TLI; }
740 explicit BasicTTIImpl(const TargetMachine *ST, Function &F);
742 // Provide value semantics. MSVC requires that we spell all of these out.
743 BasicTTIImpl(const BasicTTIImpl &Arg)
744 : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
745 BasicTTIImpl(BasicTTIImpl &&Arg)
746 : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
747 TLI(std::move(Arg.TLI)) {}
748 BasicTTIImpl &operator=(const BasicTTIImpl &RHS) {
749 BaseT::operator=(static_cast<const BaseT &>(RHS));
754 BasicTTIImpl &operator=(BasicTTIImpl &&RHS) {
755 BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
756 ST = std::move(RHS.ST);
757 TLI = std::move(RHS.TLI);