1 //===-- PPCTargetTransformInfo.cpp - PPC specific TTI ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "PPCTargetTransformInfo.h"
11 #include "llvm/Analysis/TargetTransformInfo.h"
12 #include "llvm/CodeGen/BasicTTIImpl.h"
13 #include "llvm/Support/CommandLine.h"
14 #include "llvm/Support/Debug.h"
15 #include "llvm/Target/CostTable.h"
16 #include "llvm/Target/TargetLowering.h"
19 #define DEBUG_TYPE "ppctti"
21 static cl::opt<bool> DisablePPCConstHoist("disable-ppc-constant-hoisting",
22 cl::desc("disable constant hoisting on PPC"), cl::init(false), cl::Hidden);
24 //===----------------------------------------------------------------------===//
28 //===----------------------------------------------------------------------===//
30 TargetTransformInfo::PopcntSupportKind
31 PPCTTIImpl::getPopcntSupport(unsigned TyWidth) {
32 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
33 if (ST->hasPOPCNTD() && TyWidth <= 64)
34 return TTI::PSK_FastHardware;
35 return TTI::PSK_Software;
38 unsigned PPCTTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
39 if (DisablePPCConstHoist)
40 return BaseT::getIntImmCost(Imm, Ty);
42 assert(Ty->isIntegerTy());
44 unsigned BitSize = Ty->getPrimitiveSizeInBits();
51 if (Imm.getBitWidth() <= 64) {
52 if (isInt<16>(Imm.getSExtValue()))
53 return TTI::TCC_Basic;
55 if (isInt<32>(Imm.getSExtValue())) {
56 // A constant that can be materialized using lis.
57 if ((Imm.getZExtValue() & 0xFFFF) == 0)
58 return TTI::TCC_Basic;
60 return 2 * TTI::TCC_Basic;
64 return 4 * TTI::TCC_Basic;
67 unsigned PPCTTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
68 const APInt &Imm, Type *Ty) {
69 if (DisablePPCConstHoist)
70 return BaseT::getIntImmCost(IID, Idx, Imm, Ty);
72 assert(Ty->isIntegerTy());
74 unsigned BitSize = Ty->getPrimitiveSizeInBits();
81 case Intrinsic::sadd_with_overflow:
82 case Intrinsic::uadd_with_overflow:
83 case Intrinsic::ssub_with_overflow:
84 case Intrinsic::usub_with_overflow:
85 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<16>(Imm.getSExtValue()))
88 case Intrinsic::experimental_stackmap:
89 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
92 case Intrinsic::experimental_patchpoint_void:
93 case Intrinsic::experimental_patchpoint_i64:
94 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
98 return PPCTTIImpl::getIntImmCost(Imm, Ty);
101 unsigned PPCTTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
102 const APInt &Imm, Type *Ty) {
103 if (DisablePPCConstHoist)
104 return BaseT::getIntImmCost(Opcode, Idx, Imm, Ty);
106 assert(Ty->isIntegerTy());
108 unsigned BitSize = Ty->getPrimitiveSizeInBits();
112 unsigned ImmIdx = ~0U;
113 bool ShiftedFree = false, RunFree = false, UnsignedFree = false,
117 return TTI::TCC_Free;
118 case Instruction::GetElementPtr:
119 // Always hoist the base address of a GetElementPtr. This prevents the
120 // creation of new constants for every base constant that gets constant
121 // folded with the offset.
123 return 2 * TTI::TCC_Basic;
124 return TTI::TCC_Free;
125 case Instruction::And:
126 RunFree = true; // (for the rotate-and-mask instructions)
128 case Instruction::Add:
129 case Instruction::Or:
130 case Instruction::Xor:
133 case Instruction::Sub:
134 case Instruction::Mul:
135 case Instruction::Shl:
136 case Instruction::LShr:
137 case Instruction::AShr:
140 case Instruction::ICmp:
143 // Fallthrough... (zero comparisons can use record-form instructions)
144 case Instruction::Select:
147 case Instruction::PHI:
148 case Instruction::Call:
149 case Instruction::Ret:
150 case Instruction::Load:
151 case Instruction::Store:
155 if (ZeroFree && Imm == 0)
156 return TTI::TCC_Free;
158 if (Idx == ImmIdx && Imm.getBitWidth() <= 64) {
159 if (isInt<16>(Imm.getSExtValue()))
160 return TTI::TCC_Free;
163 if (Imm.getBitWidth() <= 32 &&
164 (isShiftedMask_32(Imm.getZExtValue()) ||
165 isShiftedMask_32(~Imm.getZExtValue())))
166 return TTI::TCC_Free;
169 (isShiftedMask_64(Imm.getZExtValue()) ||
170 isShiftedMask_64(~Imm.getZExtValue())))
171 return TTI::TCC_Free;
174 if (UnsignedFree && isUInt<16>(Imm.getZExtValue()))
175 return TTI::TCC_Free;
177 if (ShiftedFree && (Imm.getZExtValue() & 0xFFFF) == 0)
178 return TTI::TCC_Free;
181 return PPCTTIImpl::getIntImmCost(Imm, Ty);
184 void PPCTTIImpl::getUnrollingPreferences(Loop *L,
185 TTI::UnrollingPreferences &UP) {
186 if (ST->getDarwinDirective() == PPC::DIR_A2) {
187 // The A2 is in-order with a deep pipeline, and concatenation unrolling
188 // helps expose latency-hiding opportunities to the instruction scheduler.
189 UP.Partial = UP.Runtime = true;
191 // We unroll a lot on the A2 (hundreds of instructions), and the benefits
192 // often outweigh the cost of a division to compute the trip count.
193 UP.AllowExpensiveTripCount = true;
196 BaseT::getUnrollingPreferences(L, UP);
199 bool PPCTTIImpl::enableAggressiveInterleaving(bool LoopHasReductions) {
200 return LoopHasReductions;
203 unsigned PPCTTIImpl::getNumberOfRegisters(bool Vector) {
204 if (Vector && !ST->hasAltivec() && !ST->hasQPX())
206 return ST->hasVSX() ? 64 : 32;
209 unsigned PPCTTIImpl::getRegisterBitWidth(bool Vector) {
211 if (ST->hasQPX()) return 256;
212 if (ST->hasAltivec()) return 128;
222 unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
223 unsigned Directive = ST->getDarwinDirective();
224 // The 440 has no SIMD support, but floating-point instructions
225 // have a 5-cycle latency, so unroll by 5x for latency hiding.
226 if (Directive == PPC::DIR_440)
229 // The A2 has no SIMD support, but floating-point instructions
230 // have a 6-cycle latency, so unroll by 6x for latency hiding.
231 if (Directive == PPC::DIR_A2)
234 // FIXME: For lack of any better information, do no harm...
235 if (Directive == PPC::DIR_E500mc || Directive == PPC::DIR_E5500)
238 // For P7 and P8, floating-point instructions have a 6-cycle latency and
239 // there are two execution units, so unroll by 12x for latency hiding.
240 if (Directive == PPC::DIR_PWR7 ||
241 Directive == PPC::DIR_PWR8)
244 // For most things, modern systems have two execution units (and
245 // out-of-order execution).
249 unsigned PPCTTIImpl::getArithmeticInstrCost(
250 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
251 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
252 TTI::OperandValueProperties Opd2PropInfo) {
253 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
255 // Fallback to the default implementation.
256 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info,
257 Opd1PropInfo, Opd2PropInfo);
260 unsigned PPCTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
262 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
265 unsigned PPCTTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
266 assert(TLI->InstructionOpcodeToISD(Opcode) && "Invalid opcode");
268 return BaseT::getCastInstrCost(Opcode, Dst, Src);
271 unsigned PPCTTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
273 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
276 unsigned PPCTTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
278 assert(Val->isVectorTy() && "This must be a vector type");
280 int ISD = TLI->InstructionOpcodeToISD(Opcode);
281 assert(ISD && "Invalid opcode");
283 if (ST->hasVSX() && Val->getScalarType()->isDoubleTy()) {
284 // Double-precision scalars are already located in index #0.
288 return BaseT::getVectorInstrCost(Opcode, Val, Index);
289 } else if (ST->hasQPX() && Val->getScalarType()->isFloatingPointTy()) {
290 // Floating point scalars are already located in index #0.
294 return BaseT::getVectorInstrCost(Opcode, Val, Index);
297 // Estimated cost of a load-hit-store delay. This was obtained
298 // experimentally as a minimum needed to prevent unprofitable
299 // vectorization for the paq8p benchmark. It may need to be
300 // raised further if other unprofitable cases remain.
301 unsigned LHSPenalty = 2;
302 if (ISD == ISD::INSERT_VECTOR_ELT)
305 // Vector element insert/extract with Altivec is very expensive,
306 // because they require store and reload with the attendant
307 // processor stall for load-hit-store. Until VSX is available,
308 // these need to be estimated as very costly.
309 if (ISD == ISD::EXTRACT_VECTOR_ELT ||
310 ISD == ISD::INSERT_VECTOR_ELT)
311 return LHSPenalty + BaseT::getVectorInstrCost(Opcode, Val, Index);
313 return BaseT::getVectorInstrCost(Opcode, Val, Index);
316 unsigned PPCTTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
318 unsigned AddressSpace) {
319 // Legalize the type.
320 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
321 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
324 unsigned Cost = BaseT::getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
326 // VSX loads/stores support unaligned access.
328 if (LT.second == MVT::v2f64 || LT.second == MVT::v2i64)
332 bool UnalignedAltivec =
334 Src->getPrimitiveSizeInBits() >= LT.second.getSizeInBits() &&
335 LT.second.getSizeInBits() == 128 &&
336 Opcode == Instruction::Load;
338 // PPC in general does not support unaligned loads and stores. They'll need
339 // to be decomposed based on the alignment factor.
340 unsigned SrcBytes = LT.second.getStoreSize();
341 if (SrcBytes && Alignment && Alignment < SrcBytes && !UnalignedAltivec) {
342 Cost += LT.first*(SrcBytes/Alignment-1);
344 // For a vector type, there is also scalarization overhead (only for
345 // stores, loads are expanded using the vector-load + permutation sequence,
346 // which is much less expensive).
347 if (Src->isVectorTy() && Opcode == Instruction::Store)
348 for (int i = 0, e = Src->getVectorNumElements(); i < e; ++i)
349 Cost += getVectorInstrCost(Instruction::ExtractElement, Src, i);