1 //===- BasicTargetTransformInfo.cpp - Basic target-independent TTI impl ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file provides the implementation of a basic TargetTransformInfo pass
11 /// predicated on the target abstractions present in the target independent
12 /// code generator. It uses these (primarily TargetLowering) to model as much
13 /// of the TTI query interface as possible. It is included by most targets so
14 /// that they can specialize only a small subset of the query space.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/CodeGen/Passes.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetSubtargetInfo.h"
27 static cl::opt<unsigned>
28 PartialUnrollingThreshold("partial-unrolling-threshold", cl::init(0),
29 cl::desc("Threshold for partial unrolling"), cl::Hidden);
31 #define DEBUG_TYPE "basictti"
35 class BasicTTI final : public ImmutablePass, public TargetTransformInfo {
36 const TargetMachine *TM;
38 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
39 /// are set if the result needs to be inserted and/or extracted from vectors.
40 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
42 /// Estimate the cost overhead of SK_Alternate shuffle.
43 unsigned getAltShuffleOverhead(Type *Ty) const;
45 const TargetLoweringBase *getTLI() const {
46 return TM->getSubtargetImpl()->getTargetLowering();
50 BasicTTI() : ImmutablePass(ID), TM(nullptr) {
51 llvm_unreachable("This pass cannot be directly constructed");
54 BasicTTI(const TargetMachine *TM) : ImmutablePass(ID), TM(TM) {
55 initializeBasicTTIPass(*PassRegistry::getPassRegistry());
58 void initializePass() override {
62 void getAnalysisUsage(AnalysisUsage &AU) const override {
63 TargetTransformInfo::getAnalysisUsage(AU);
66 /// Pass identification.
69 /// Provide necessary pointer adjustments for the two base classes.
70 void *getAdjustedAnalysisPointer(const void *ID) override {
71 if (ID == &TargetTransformInfo::ID)
72 return (TargetTransformInfo*)this;
76 bool hasBranchDivergence() const override;
78 /// \name Scalar TTI Implementations
81 bool isLegalAddImmediate(int64_t imm) const override;
82 bool isLegalICmpImmediate(int64_t imm) const override;
83 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
84 int64_t BaseOffset, bool HasBaseReg,
85 int64_t Scale) const override;
86 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
87 int64_t BaseOffset, bool HasBaseReg,
88 int64_t Scale) const override;
89 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
90 bool isTypeLegal(Type *Ty) const override;
91 unsigned getJumpBufAlignment() const override;
92 unsigned getJumpBufSize() const override;
93 bool shouldBuildLookupTables() const override;
94 bool haveFastSqrt(Type *Ty) const override;
95 void getUnrollingPreferences(Loop *L,
96 UnrollingPreferences &UP) const override;
100 /// \name Vector TTI Implementations
103 unsigned getNumberOfRegisters(bool Vector) const override;
104 unsigned getMaximumUnrollFactor() const override;
105 unsigned getRegisterBitWidth(bool Vector) const override;
106 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind,
107 OperandValueKind, OperandValueProperties,
108 OperandValueProperties) const override;
109 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
110 int Index, Type *SubTp) const override;
111 unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
112 Type *Src) const override;
113 unsigned getCFInstrCost(unsigned Opcode) const override;
114 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
115 Type *CondTy) const override;
116 unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
117 unsigned Index) const override;
118 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
119 unsigned AddressSpace) const override;
120 unsigned getIntrinsicInstrCost(Intrinsic::ID, Type *RetTy,
121 ArrayRef<Type*> Tys) const override;
122 unsigned getNumberOfParts(Type *Tp) const override;
123 unsigned getAddressComputationCost( Type *Ty, bool IsComplex) const override;
124 unsigned getReductionCost(unsigned Opcode, Type *Ty,
125 bool IsPairwise) const override;
132 INITIALIZE_AG_PASS(BasicTTI, TargetTransformInfo, "basictti",
133 "Target independent code generator's TTI", true, true, false)
134 char BasicTTI::ID = 0;
137 llvm::createBasicTargetTransformInfoPass(const TargetMachine *TM) {
138 return new BasicTTI(TM);
141 bool BasicTTI::hasBranchDivergence() const { return false; }
143 bool BasicTTI::isLegalAddImmediate(int64_t imm) const {
144 return getTLI()->isLegalAddImmediate(imm);
147 bool BasicTTI::isLegalICmpImmediate(int64_t imm) const {
148 return getTLI()->isLegalICmpImmediate(imm);
151 bool BasicTTI::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
152 int64_t BaseOffset, bool HasBaseReg,
153 int64_t Scale) const {
154 TargetLoweringBase::AddrMode AM;
156 AM.BaseOffs = BaseOffset;
157 AM.HasBaseReg = HasBaseReg;
159 return getTLI()->isLegalAddressingMode(AM, Ty);
162 int BasicTTI::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
163 int64_t BaseOffset, bool HasBaseReg,
164 int64_t Scale) const {
165 TargetLoweringBase::AddrMode AM;
167 AM.BaseOffs = BaseOffset;
168 AM.HasBaseReg = HasBaseReg;
170 return getTLI()->getScalingFactorCost(AM, Ty);
173 bool BasicTTI::isTruncateFree(Type *Ty1, Type *Ty2) const {
174 return getTLI()->isTruncateFree(Ty1, Ty2);
177 bool BasicTTI::isTypeLegal(Type *Ty) const {
178 EVT T = getTLI()->getValueType(Ty);
179 return getTLI()->isTypeLegal(T);
182 unsigned BasicTTI::getJumpBufAlignment() const {
183 return getTLI()->getJumpBufAlignment();
186 unsigned BasicTTI::getJumpBufSize() const {
187 return getTLI()->getJumpBufSize();
190 bool BasicTTI::shouldBuildLookupTables() const {
191 const TargetLoweringBase *TLI = getTLI();
192 return TLI->supportJumpTables() &&
193 (TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
194 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
197 bool BasicTTI::haveFastSqrt(Type *Ty) const {
198 const TargetLoweringBase *TLI = getTLI();
199 EVT VT = TLI->getValueType(Ty);
200 return TLI->isTypeLegal(VT) && TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
203 void BasicTTI::getUnrollingPreferences(Loop *L,
204 UnrollingPreferences &UP) const {
205 // This unrolling functionality is target independent, but to provide some
206 // motivation for its intended use, for x86:
208 // According to the Intel 64 and IA-32 Architectures Optimization Reference
209 // Manual, Intel Core models and later have a loop stream detector
210 // (and associated uop queue) that can benefit from partial unrolling.
211 // The relevant requirements are:
212 // - The loop must have no more than 4 (8 for Nehalem and later) branches
213 // taken, and none of them may be calls.
214 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
216 // According to the Software Optimization Guide for AMD Family 15h Processors,
217 // models 30h-4fh (Steamroller and later) have a loop predictor and loop
218 // buffer which can benefit from partial unrolling.
219 // The relevant requirements are:
220 // - The loop must have fewer than 16 branches
221 // - The loop must have less than 40 uops in all executed loop branches
223 // The number of taken branches in a loop is hard to estimate here, and
224 // benchmarking has revealed that it is better not to be conservative when
225 // estimating the branch count. As a result, we'll ignore the branch limits
226 // until someone finds a case where it matters in practice.
229 const TargetSubtargetInfo *ST = &TM->getSubtarget<TargetSubtargetInfo>();
230 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
231 MaxOps = PartialUnrollingThreshold;
232 else if (ST->getSchedModel()->LoopMicroOpBufferSize > 0)
233 MaxOps = ST->getSchedModel()->LoopMicroOpBufferSize;
237 // Scan the loop: don't unroll loops with calls.
238 for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
242 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
243 if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
244 ImmutableCallSite CS(J);
245 if (const Function *F = CS.getCalledFunction()) {
246 if (!TopTTI->isLoweredToCall(F))
254 // Enable runtime and partial unrolling up to the specified size.
255 UP.Partial = UP.Runtime = true;
256 UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
259 //===----------------------------------------------------------------------===//
261 // Calls used by the vectorizers.
263 //===----------------------------------------------------------------------===//
265 unsigned BasicTTI::getScalarizationOverhead(Type *Ty, bool Insert,
266 bool Extract) const {
267 assert (Ty->isVectorTy() && "Can only scalarize vectors");
270 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
272 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
274 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
280 unsigned BasicTTI::getNumberOfRegisters(bool Vector) const {
284 unsigned BasicTTI::getRegisterBitWidth(bool Vector) const {
288 unsigned BasicTTI::getMaximumUnrollFactor() const {
292 unsigned BasicTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
293 OperandValueKind, OperandValueKind,
294 OperandValueProperties,
295 OperandValueProperties) const {
296 // Check if any of the operands are vector operands.
297 const TargetLoweringBase *TLI = getTLI();
298 int ISD = TLI->InstructionOpcodeToISD(Opcode);
299 assert(ISD && "Invalid opcode");
301 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
303 bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
304 // Assume that floating point arithmetic operations cost twice as much as
305 // integer operations.
306 unsigned OpCost = (IsFloat ? 2 : 1);
308 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
309 // The operation is legal. Assume it costs 1.
310 // If the type is split to multiple registers, assume that there is some
312 // TODO: Once we have extract/insert subvector cost we need to use them.
314 return LT.first * 2 * OpCost;
315 return LT.first * 1 * OpCost;
318 if (!TLI->isOperationExpand(ISD, LT.second)) {
319 // If the operation is custom lowered then assume
320 // thare the code is twice as expensive.
321 return LT.first * 2 * OpCost;
324 // Else, assume that we need to scalarize this op.
325 if (Ty->isVectorTy()) {
326 unsigned Num = Ty->getVectorNumElements();
327 unsigned Cost = TopTTI->getArithmeticInstrCost(Opcode, Ty->getScalarType());
328 // return the cost of multiple scalar invocation plus the cost of inserting
329 // and extracting the values.
330 return getScalarizationOverhead(Ty, true, true) + Num * Cost;
333 // We don't know anything about this scalar instruction.
337 unsigned BasicTTI::getAltShuffleOverhead(Type *Ty) const {
338 assert(Ty->isVectorTy() && "Can only shuffle vectors");
340 // Shuffle cost is equal to the cost of extracting element from its argument
341 // plus the cost of inserting them onto the result vector.
343 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from index
344 // 0 of first vector, index 1 of second vector,index 2 of first vector and
345 // finally index 3 of second vector and insert them at index <0,1,2,3> of
347 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
348 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
349 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
354 unsigned BasicTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
356 if (Kind == SK_Alternate) {
357 return getAltShuffleOverhead(Tp);
362 unsigned BasicTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
364 const TargetLoweringBase *TLI = getTLI();
365 int ISD = TLI->InstructionOpcodeToISD(Opcode);
366 assert(ISD && "Invalid opcode");
368 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(Src);
369 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(Dst);
371 // Check for NOOP conversions.
372 if (SrcLT.first == DstLT.first &&
373 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
375 // Bitcast between types that are legalized to the same type are free.
376 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
380 if (Opcode == Instruction::Trunc &&
381 TLI->isTruncateFree(SrcLT.second, DstLT.second))
384 if (Opcode == Instruction::ZExt &&
385 TLI->isZExtFree(SrcLT.second, DstLT.second))
388 // If the cast is marked as legal (or promote) then assume low cost.
389 if (SrcLT.first == DstLT.first &&
390 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
393 // Handle scalar conversions.
394 if (!Src->isVectorTy() && !Dst->isVectorTy()) {
396 // Scalar bitcasts are usually free.
397 if (Opcode == Instruction::BitCast)
400 // Just check the op cost. If the operation is legal then assume it costs 1.
401 if (!TLI->isOperationExpand(ISD, DstLT.second))
404 // Assume that illegal scalar instruction are expensive.
408 // Check vector-to-vector casts.
409 if (Dst->isVectorTy() && Src->isVectorTy()) {
411 // If the cast is between same-sized registers, then the check is simple.
412 if (SrcLT.first == DstLT.first &&
413 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
415 // Assume that Zext is done using AND.
416 if (Opcode == Instruction::ZExt)
419 // Assume that sext is done using SHL and SRA.
420 if (Opcode == Instruction::SExt)
423 // Just check the op cost. If the operation is legal then assume it costs
424 // 1 and multiply by the type-legalization overhead.
425 if (!TLI->isOperationExpand(ISD, DstLT.second))
426 return SrcLT.first * 1;
429 // If we are converting vectors and the operation is illegal, or
430 // if the vectors are legalized to different types, estimate the
431 // scalarization costs.
432 unsigned Num = Dst->getVectorNumElements();
433 unsigned Cost = TopTTI->getCastInstrCost(Opcode, Dst->getScalarType(),
434 Src->getScalarType());
436 // Return the cost of multiple scalar invocation plus the cost of
437 // inserting and extracting the values.
438 return getScalarizationOverhead(Dst, true, true) + Num * Cost;
441 // We already handled vector-to-vector and scalar-to-scalar conversions. This
442 // is where we handle bitcast between vectors and scalars. We need to assume
443 // that the conversion is scalarized in one way or another.
444 if (Opcode == Instruction::BitCast)
445 // Illegal bitcasts are done by storing and loading from a stack slot.
446 return (Src->isVectorTy()? getScalarizationOverhead(Src, false, true):0) +
447 (Dst->isVectorTy()? getScalarizationOverhead(Dst, true, false):0);
449 llvm_unreachable("Unhandled cast");
452 unsigned BasicTTI::getCFInstrCost(unsigned Opcode) const {
453 // Branches are assumed to be predicted.
457 unsigned BasicTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
458 Type *CondTy) const {
459 const TargetLoweringBase *TLI = getTLI();
460 int ISD = TLI->InstructionOpcodeToISD(Opcode);
461 assert(ISD && "Invalid opcode");
463 // Selects on vectors are actually vector selects.
464 if (ISD == ISD::SELECT) {
465 assert(CondTy && "CondTy must exist");
466 if (CondTy->isVectorTy())
470 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
472 if (!TLI->isOperationExpand(ISD, LT.second)) {
473 // The operation is legal. Assume it costs 1. Multiply
474 // by the type-legalization overhead.
478 // Otherwise, assume that the cast is scalarized.
479 if (ValTy->isVectorTy()) {
480 unsigned Num = ValTy->getVectorNumElements();
482 CondTy = CondTy->getScalarType();
483 unsigned Cost = TopTTI->getCmpSelInstrCost(Opcode, ValTy->getScalarType(),
486 // Return the cost of multiple scalar invocation plus the cost of inserting
487 // and extracting the values.
488 return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
491 // Unknown scalar opcode.
495 unsigned BasicTTI::getVectorInstrCost(unsigned Opcode, Type *Val,
496 unsigned Index) const {
497 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Val->getScalarType());
502 unsigned BasicTTI::getMemoryOpCost(unsigned Opcode, Type *Src,
504 unsigned AddressSpace) const {
505 assert(!Src->isVoidTy() && "Invalid type");
506 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Src);
508 // Assuming that all loads of legal types cost 1.
509 unsigned Cost = LT.first;
511 if (Src->isVectorTy() &&
512 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
513 // This is a vector load that legalizes to a larger type than the vector
514 // itself. Unless the corresponding extending load or truncating store is
515 // legal, then this will scalarize.
516 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
517 EVT MemVT = getTLI()->getValueType(Src, true);
518 if (MemVT.isSimple() && MemVT != MVT::Other) {
519 if (Opcode == Instruction::Store)
520 LA = getTLI()->getTruncStoreAction(LT.second, MemVT.getSimpleVT());
522 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, MemVT.getSimpleVT());
525 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
526 // This is a vector load/store for some illegal type that is scalarized.
527 // We must account for the cost of building or decomposing the vector.
528 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
529 Opcode == Instruction::Store);
536 unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
537 ArrayRef<Type *> Tys) const {
541 // Assume that we need to scalarize this intrinsic.
542 unsigned ScalarizationCost = 0;
543 unsigned ScalarCalls = 1;
544 if (RetTy->isVectorTy()) {
545 ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
546 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
548 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
549 if (Tys[i]->isVectorTy()) {
550 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
551 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
555 return ScalarCalls + ScalarizationCost;
557 // Look for intrinsics that can be lowered directly or turned into a scalar
559 case Intrinsic::sqrt: ISD = ISD::FSQRT; break;
560 case Intrinsic::sin: ISD = ISD::FSIN; break;
561 case Intrinsic::cos: ISD = ISD::FCOS; break;
562 case Intrinsic::exp: ISD = ISD::FEXP; break;
563 case Intrinsic::exp2: ISD = ISD::FEXP2; break;
564 case Intrinsic::log: ISD = ISD::FLOG; break;
565 case Intrinsic::log10: ISD = ISD::FLOG10; break;
566 case Intrinsic::log2: ISD = ISD::FLOG2; break;
567 case Intrinsic::fabs: ISD = ISD::FABS; break;
568 case Intrinsic::copysign: ISD = ISD::FCOPYSIGN; break;
569 case Intrinsic::floor: ISD = ISD::FFLOOR; break;
570 case Intrinsic::ceil: ISD = ISD::FCEIL; break;
571 case Intrinsic::trunc: ISD = ISD::FTRUNC; break;
572 case Intrinsic::nearbyint:
573 ISD = ISD::FNEARBYINT; break;
574 case Intrinsic::rint: ISD = ISD::FRINT; break;
575 case Intrinsic::round: ISD = ISD::FROUND; break;
576 case Intrinsic::pow: ISD = ISD::FPOW; break;
577 case Intrinsic::fma: ISD = ISD::FMA; break;
578 case Intrinsic::fmuladd: ISD = ISD::FMA; break;
579 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
580 case Intrinsic::lifetime_start:
581 case Intrinsic::lifetime_end:
585 const TargetLoweringBase *TLI = getTLI();
586 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(RetTy);
588 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
589 // The operation is legal. Assume it costs 1.
590 // If the type is split to multiple registers, assume that thre is some
592 // TODO: Once we have extract/insert subvector cost we need to use them.
598 if (!TLI->isOperationExpand(ISD, LT.second)) {
599 // If the operation is custom lowered then assume
600 // thare the code is twice as expensive.
604 // If we can't lower fmuladd into an FMA estimate the cost as a floating
605 // point mul followed by an add.
606 if (IID == Intrinsic::fmuladd)
607 return TopTTI->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
608 TopTTI->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
610 // Else, assume that we need to scalarize this intrinsic. For math builtins
611 // this will emit a costly libcall, adding call overhead and spills. Make it
613 if (RetTy->isVectorTy()) {
614 unsigned Num = RetTy->getVectorNumElements();
615 unsigned Cost = TopTTI->getIntrinsicInstrCost(IID, RetTy->getScalarType(),
617 return 10 * Cost * Num;
620 // This is going to be turned into a library call, make it expensive.
624 unsigned BasicTTI::getNumberOfParts(Type *Tp) const {
625 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Tp);
629 unsigned BasicTTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
633 unsigned BasicTTI::getReductionCost(unsigned Opcode, Type *Ty,
634 bool IsPairwise) const {
635 assert(Ty->isVectorTy() && "Expect a vector type");
636 unsigned NumVecElts = Ty->getVectorNumElements();
637 unsigned NumReduxLevels = Log2_32(NumVecElts);
638 unsigned ArithCost = NumReduxLevels *
639 TopTTI->getArithmeticInstrCost(Opcode, Ty);
640 // Assume the pairwise shuffles add a cost.
641 unsigned ShuffleCost =
642 NumReduxLevels * (IsPairwise + 1) *
643 TopTTI->getShuffleCost(SK_ExtractSubvector, Ty, NumVecElts / 2, Ty);
644 return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);