1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// PPC target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
18 #include "PPCTargetMachine.h"
19 #include "llvm/Analysis/TargetTransformInfo.h"
20 #include "llvm/Support/CommandLine.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/CostTable.h"
23 #include "llvm/Target/TargetLowering.h"
26 #define DEBUG_TYPE "ppctti"
28 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
29 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
31 // Declare the pass initialization routine locally as target-specific passes
32 // don't have a target-wide initialization entry point, and so we rely on the
33 // pass constructor initialization.
35 void initializePPCTTIPass(PassRegistry &);
40 class PPCTTI final : public ImmutablePass, public TargetTransformInfo {
41 const PPCSubtarget *ST;
42 const PPCTargetLowering *TLI;
45 PPCTTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) {
46 llvm_unreachable("This pass cannot be directly constructed");
49 PPCTTI(const PPCTargetMachine *TM)
50 : ImmutablePass(ID), ST(TM->getSubtargetImpl()),
51 TLI(TM->getSubtargetImpl()->getTargetLowering()) {
52 initializePPCTTIPass(*PassRegistry::getPassRegistry());
55 virtual void initializePass() override {
59 virtual void getAnalysisUsage(AnalysisUsage &AU) const override {
60 TargetTransformInfo::getAnalysisUsage(AU);
63 /// Pass identification.
66 /// Provide necessary pointer adjustments for the two base classes.
67 virtual void *getAdjustedAnalysisPointer(const void *ID) override {
68 if (ID == &TargetTransformInfo::ID)
69 return (TargetTransformInfo*)this;
73 /// \name Scalar TTI Implementations
75 unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
77 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
78 Type *Ty) const override;
79 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
80 Type *Ty) const override;
82 virtual PopcntSupportKind
83 getPopcntSupport(unsigned TyWidth) const override;
84 virtual void getUnrollingPreferences(
85 Loop *L, UnrollingPreferences &UP) const override;
89 /// \name Vector TTI Implementations
92 virtual unsigned getNumberOfRegisters(bool Vector) const override;
93 virtual unsigned getRegisterBitWidth(bool Vector) const override;
94 virtual unsigned getMaximumUnrollFactor() const override;
96 getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind,
97 OperandValueKind, OperandValueProperties,
98 OperandValueProperties) const override;
99 virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
100 int Index, Type *SubTp) const override;
101 virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
102 Type *Src) const override;
103 virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
104 Type *CondTy) const override;
105 virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
106 unsigned Index) const override;
107 virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
109 unsigned AddressSpace) const override;
114 } // end anonymous namespace
116 INITIALIZE_AG_PASS(PPCTTI, TargetTransformInfo, "ppctti",
117 "PPC Target Transform Info", true, true, false)
121 llvm::createPPCTargetTransformInfoPass(const PPCTargetMachine *TM) {
122 return new PPCTTI(TM);
126 //===----------------------------------------------------------------------===//
130 //===----------------------------------------------------------------------===//
132 PPCTTI::PopcntSupportKind PPCTTI::getPopcntSupport(unsigned TyWidth) const {
133 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
134 if (ST->hasPOPCNTD() && TyWidth <= 64)
135 return PSK_FastHardware;
139 unsigned PPCTTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
140 if (DisablePPCConstHoist)
141 return TargetTransformInfo::getIntImmCost(Imm, Ty);
143 assert(Ty->isIntegerTy());
145 unsigned BitSize = Ty->getPrimitiveSizeInBits();
152 if (Imm.getBitWidth() <= 64) {
153 if (isInt<16>(Imm.getSExtValue()))
156 if (isInt<32>(Imm.getSExtValue())) {
157 // A constant that can be materialized using lis.
158 if ((Imm.getZExtValue() & 0xFFFF) == 0)
161 return 2 * TCC_Basic;
165 return 4 * TCC_Basic;
168 unsigned PPCTTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
169 const APInt &Imm, Type *Ty) const {
170 if (DisablePPCConstHoist)
171 return TargetTransformInfo::getIntImmCost(IID, Idx, Imm, Ty);
173 assert(Ty->isIntegerTy());
175 unsigned BitSize = Ty->getPrimitiveSizeInBits();
180 default: return TCC_Free;
181 case Intrinsic::sadd_with_overflow:
182 case Intrinsic::uadd_with_overflow:
183 case Intrinsic::ssub_with_overflow:
184 case Intrinsic::usub_with_overflow:
185 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
189 return PPCTTI::getIntImmCost(Imm, Ty);
192 unsigned PPCTTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
194 if (DisablePPCConstHoist)
195 return TargetTransformInfo::getIntImmCost(Opcode, Idx, Imm, Ty);
197 assert(Ty->isIntegerTy());
199 unsigned BitSize = Ty->getPrimitiveSizeInBits();
203 unsigned ImmIdx = ~0U;
204 bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
207 default: return TCC_Free;
208 case Instruction::GetElementPtr:
209 // Always hoist the base address of a GetElementPtr. This prevents the
210 // creation of new constants for every base constant that gets constant
211 // folded with the offset.
213 return 2 * TCC_Basic;
215 case Instruction::And:
216 RunFree = true; // (for the rotate-and-mask instructions)
218 case Instruction::Add:
219 case Instruction::Or:
220 case Instruction::Xor:
223 case Instruction::Sub:
224 case Instruction::Mul:
225 case Instruction::Shl:
226 case Instruction::LShr:
227 case Instruction::AShr:
230 case Instruction::ICmp:
233 // Fallthrough... (zero comparisons can use record-form instructions)
234 case Instruction::Select:
237 case Instruction::PHI:
238 case Instruction::Call:
239 case Instruction::Ret:
240 case Instruction::Load:
241 case Instruction::Store:
245 if (ZeroFree && Imm == 0)
248 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
249 if (isInt<16>(Imm.getSExtValue()))
253 if (Imm.getBitWidth() <= 32 &&
254 (isShiftedMask_32(Imm.getZExtValue()) ||
255 isShiftedMask_32(~Imm.getZExtValue())))
260 (isShiftedMask_64(Imm.getZExtValue()) ||
261 isShiftedMask_64(~Imm.getZExtValue())))
265 if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
268 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
272 return PPCTTI::getIntImmCost(Imm, Ty);
275 void PPCTTI::getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const {
276 if (ST->getDarwinDirective() == PPC::DIR_A2) {
277 // The A2 is in-order with a deep pipeline, and concatenation unrolling
278 // helps expose latency-hiding opportunities to the instruction scheduler.
279 UP.Partial = UP.Runtime = true;
283 unsigned PPCTTI::getNumberOfRegisters(bool Vector) const {
284 if (Vector && !ST->hasAltivec())
286 return ST->hasVSX() ? 64 : 32;
289 unsigned PPCTTI::getRegisterBitWidth(bool Vector) const {
291 if (ST->hasAltivec()) return 128;
301 unsigned PPCTTI::getMaximumUnrollFactor() const {
302 unsigned Directive = ST->getDarwinDirective();
303 // The 440 has no SIMD support, but floating-point instructions
304 // have a 5-cycle latency, so unroll by 5x for latency hiding.
305 if (Directive == PPC::DIR_440)
308 // The A2 has no SIMD support, but floating-point instructions
309 // have a 6-cycle latency, so unroll by 6x for latency hiding.
310 if (Directive == PPC::DIR_A2)
313 // FIXME: For lack of any better information, do no harm...
314 if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
317 // For most things, modern systems have two execution units (and
318 // out-of-order execution).
322 unsigned PPCTTI::getArithmeticInstrCost(
323 unsigned Opcode, Type *Ty, OperandValueKind Op1Info,
324 OperandValueKind Op2Info, OperandValueProperties Opd1PropInfo,
325 OperandValueProperties Opd2PropInfo) const {
326 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
328 // Fallback to the default implementation.
329 return TargetTransformInfo::getArithmeticInstrCost(
330 Opcode, Ty, Op1Info, Op2Info, Opd1PropInfo, Opd2PropInfo);
333 unsigned PPCTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
335 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
338 unsigned PPCTTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
339 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
341 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
344 unsigned PPCTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
345 Type *CondTy) const {
346 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
349 unsigned PPCTTI::getVectorInstrCost(unsigned Opcode, Type *Val,
350 unsigned Index) const {
351 assert(Val->isVectorTy() && "This must be a vector type");
353 int ISD = TLI->InstructionOpcodeToISD(Opcode);
354 assert(ISD && "Invalid opcode");
356 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
357 // Double-precision scalars are already located in index #0.
361 return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
364 // Estimated cost of a load-hit-store delay. This was obtained
365 // experimentally as a minimum needed to prevent unprofitable
366 // vectorization for the paq8p benchmark. It may need to be
367 // raised further if other unprofitable cases remain.
368 unsigned LHSPenalty = 2;
369 if (ISD == ISD::INSERT_VECTOR_ELT)
372 // Vector element insert/extract with Altivec is very expensive,
373 // because they require store and reload with the attendant
374 // processor stall for load-hit-store. Until VSX is available,
375 // these need to be estimated as very costly.
376 if (ISD == ISD::EXTRACT_VECTOR_ELT ||
377 ISD == ISD::INSERT_VECTOR_ELT)
379 TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
381 return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
384 unsigned PPCTTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
385 unsigned AddressSpace) const {
386 // Legalize the type.
387 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
388 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
392 TargetTransformInfo::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
394 // VSX loads/stores support unaligned access.
396 if (LT.second == MVT::v2f64 || LT.second == MVT::v2i64)
400 bool UnalignedAltivec =
402 Src->getPrimitiveSizeInBits() >= LT.second.getSizeInBits() &&
403 LT.second.getSizeInBits() == 128 &&
404 Opcode == Instruction::Load;
406 // PPC in general does not support unaligned loads and stores. They'll need
407 // to be decomposed based on the alignment factor.
408 unsigned SrcBytes = LT.second.getStoreSize();
409 if (SrcBytes && Alignment && Alignment < SrcBytes && !UnalignedAltivec) {
410 Cost += LT.first*(SrcBytes/Alignment-1);
412 // For a vector type, there is also scalarization overhead (only for
413 // stores, loads are expanded using the vector-load + permutation sequence,
414 // which is much less expensive).
415 if (Src->isVectorTy() && Opcode == Instruction::Store)
416 for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i)
417 Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);