1 //===-- ARMTargetTransformInfo.cpp - ARM specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// ARM target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #define DEBUG_TYPE "armtti"
19 #include "ARMTargetMachine.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/CostTable.h"
26 // Declare the pass initialization routine locally as target-specific passes
27 // don't havve a target-wide initialization entry point, and so we rely on the
28 // pass constructor initialization.
30 void initializeARMTTIPass(PassRegistry &);
35 class ARMTTI : public ImmutablePass, public TargetTransformInfo {
36 const ARMBaseTargetMachine *TM;
37 const ARMSubtarget *ST;
38 const ARMTargetLowering *TLI;
40 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
41 /// are set if the result needs to be inserted and/or extracted from vectors.
42 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
45 ARMTTI() : ImmutablePass(ID), TM(0), ST(0), TLI(0) {
46 llvm_unreachable("This pass cannot be directly constructed");
49 ARMTTI(const ARMBaseTargetMachine *TM)
50 : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
51 TLI(TM->getTargetLowering()) {
52 initializeARMTTIPass(*PassRegistry::getPassRegistry());
55 virtual void initializePass() {
59 virtual void finalizePass() {
63 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
64 TargetTransformInfo::getAnalysisUsage(AU);
67 /// Pass identification.
70 /// Provide necessary pointer adjustments for the two base classes.
71 virtual void *getAdjustedAnalysisPointer(const void *ID) {
72 if (ID == &TargetTransformInfo::ID)
73 return (TargetTransformInfo*)this;
77 /// \name Scalar TTI Implementations
80 virtual unsigned getIntImmCost(const APInt &Imm, Type *Ty) const;
85 /// \name Vector TTI Implementations
88 unsigned getNumberOfRegisters(bool Vector) const {
95 if (ST->isThumb1Only())
100 unsigned getRegisterBitWidth(bool Vector) const {
110 unsigned getMaximumUnrollFactor() const {
111 // These are out of order CPUs:
112 if (ST->isCortexA15() || ST->isSwift())
117 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
118 int Index, Type *SubTp) const;
120 unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
123 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) const;
125 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) const;
127 unsigned getAddressComputationCost(Type *Val) const;
131 } // end anonymous namespace
133 INITIALIZE_AG_PASS(ARMTTI, TargetTransformInfo, "armtti",
134 "ARM Target Transform Info", true, true, false)
138 llvm::createARMTargetTransformInfoPass(const ARMBaseTargetMachine *TM) {
139 return new ARMTTI(TM);
143 unsigned ARMTTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
144 assert(Ty->isIntegerTy());
146 unsigned Bits = Ty->getPrimitiveSizeInBits();
147 if (Bits == 0 || Bits > 32)
150 int32_t SImmVal = Imm.getSExtValue();
151 uint32_t ZImmVal = Imm.getZExtValue();
152 if (!ST->isThumb()) {
153 if ((SImmVal >= 0 && SImmVal < 65536) ||
154 (ARM_AM::getSOImmVal(ZImmVal) != -1) ||
155 (ARM_AM::getSOImmVal(~ZImmVal) != -1))
157 return ST->hasV6T2Ops() ? 2 : 3;
158 } else if (ST->isThumb2()) {
159 if ((SImmVal >= 0 && SImmVal < 65536) ||
160 (ARM_AM::getT2SOImmVal(ZImmVal) != -1) ||
161 (ARM_AM::getT2SOImmVal(~ZImmVal) != -1))
163 return ST->hasV6T2Ops() ? 2 : 3;
165 if (SImmVal >= 0 && SImmVal < 256)
167 if ((~ZImmVal < 256) || ARM_AM::isThumbImmShiftedVal(ZImmVal))
169 // Load from constantpool.
175 unsigned ARMTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
177 int ISD = TLI->InstructionOpcodeToISD(Opcode);
178 assert(ISD && "Invalid opcode");
180 // Single to/from double precision conversions.
181 static const CostTblEntry<MVT> NEONFltDblTbl[] = {
182 // Vector fptrunc/fpext conversions.
183 { ISD::FP_ROUND, MVT::v2f64, 2 },
184 { ISD::FP_EXTEND, MVT::v2f32, 2 },
185 { ISD::FP_EXTEND, MVT::v4f32, 4 }
188 if (Src->isVectorTy() && ST->hasNEON() && (ISD == ISD::FP_ROUND ||
189 ISD == ISD::FP_EXTEND)) {
190 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
191 int Idx = CostTableLookup<MVT>(NEONFltDblTbl, array_lengthof(NEONFltDblTbl),
194 return LT.first * NEONFltDblTbl[Idx].Cost;
197 EVT SrcTy = TLI->getValueType(Src);
198 EVT DstTy = TLI->getValueType(Dst);
200 if (!SrcTy.isSimple() || !DstTy.isSimple())
201 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
203 // Some arithmetic, load and store operations have specific instructions
204 // to cast up/down their types automatically at no extra cost.
205 // TODO: Get these tables to know at least what the related operations are.
206 static const TypeConversionCostTblEntry<MVT> NEONVectorConversionTbl[] = {
207 { ISD::SIGN_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
208 { ISD::ZERO_EXTEND, MVT::v4i32, MVT::v4i16, 0 },
209 { ISD::SIGN_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
210 { ISD::ZERO_EXTEND, MVT::v2i64, MVT::v2i32, 1 },
211 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 0 },
212 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i32, 1 },
214 // Operations that we legalize using load/stores to the stack.
215 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 16*2 + 4*4 },
216 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 16*2 + 4*3 },
217 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 8*2 + 2*4 },
218 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 8*2 + 2*3 },
219 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 4*1 + 16*2 + 2*1 },
220 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2*1 + 8*2 + 1 },
222 // Vector float <-> i32 conversions.
223 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
224 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
225 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
226 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
228 // Vector double <-> i32 conversions.
229 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
230 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
231 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
232 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 }
235 if (SrcTy.isVector() && ST->hasNEON()) {
236 int Idx = ConvertCostTableLookup<MVT>(NEONVectorConversionTbl,
237 array_lengthof(NEONVectorConversionTbl),
238 ISD, DstTy.getSimpleVT(), SrcTy.getSimpleVT());
240 return NEONVectorConversionTbl[Idx].Cost;
243 // Scalar float to integer conversions.
244 static const TypeConversionCostTblEntry<MVT> NEONFloatConversionTbl[] = {
245 { ISD::FP_TO_SINT, MVT::i1, MVT::f32, 2 },
246 { ISD::FP_TO_UINT, MVT::i1, MVT::f32, 2 },
247 { ISD::FP_TO_SINT, MVT::i1, MVT::f64, 2 },
248 { ISD::FP_TO_UINT, MVT::i1, MVT::f64, 2 },
249 { ISD::FP_TO_SINT, MVT::i8, MVT::f32, 2 },
250 { ISD::FP_TO_UINT, MVT::i8, MVT::f32, 2 },
251 { ISD::FP_TO_SINT, MVT::i8, MVT::f64, 2 },
252 { ISD::FP_TO_UINT, MVT::i8, MVT::f64, 2 },
253 { ISD::FP_TO_SINT, MVT::i16, MVT::f32, 2 },
254 { ISD::FP_TO_UINT, MVT::i16, MVT::f32, 2 },
255 { ISD::FP_TO_SINT, MVT::i16, MVT::f64, 2 },
256 { ISD::FP_TO_UINT, MVT::i16, MVT::f64, 2 },
257 { ISD::FP_TO_SINT, MVT::i32, MVT::f32, 2 },
258 { ISD::FP_TO_UINT, MVT::i32, MVT::f32, 2 },
259 { ISD::FP_TO_SINT, MVT::i32, MVT::f64, 2 },
260 { ISD::FP_TO_UINT, MVT::i32, MVT::f64, 2 },
261 { ISD::FP_TO_SINT, MVT::i64, MVT::f32, 10 },
262 { ISD::FP_TO_UINT, MVT::i64, MVT::f32, 10 },
263 { ISD::FP_TO_SINT, MVT::i64, MVT::f64, 10 },
264 { ISD::FP_TO_UINT, MVT::i64, MVT::f64, 10 }
266 if (SrcTy.isFloatingPoint() && ST->hasNEON()) {
267 int Idx = ConvertCostTableLookup<MVT>(NEONFloatConversionTbl,
268 array_lengthof(NEONFloatConversionTbl),
269 ISD, DstTy.getSimpleVT(),
270 SrcTy.getSimpleVT());
272 return NEONFloatConversionTbl[Idx].Cost;
275 // Scalar integer to float conversions.
276 static const TypeConversionCostTblEntry<MVT> NEONIntegerConversionTbl[] = {
277 { ISD::SINT_TO_FP, MVT::f32, MVT::i1, 2 },
278 { ISD::UINT_TO_FP, MVT::f32, MVT::i1, 2 },
279 { ISD::SINT_TO_FP, MVT::f64, MVT::i1, 2 },
280 { ISD::UINT_TO_FP, MVT::f64, MVT::i1, 2 },
281 { ISD::SINT_TO_FP, MVT::f32, MVT::i8, 2 },
282 { ISD::UINT_TO_FP, MVT::f32, MVT::i8, 2 },
283 { ISD::SINT_TO_FP, MVT::f64, MVT::i8, 2 },
284 { ISD::UINT_TO_FP, MVT::f64, MVT::i8, 2 },
285 { ISD::SINT_TO_FP, MVT::f32, MVT::i16, 2 },
286 { ISD::UINT_TO_FP, MVT::f32, MVT::i16, 2 },
287 { ISD::SINT_TO_FP, MVT::f64, MVT::i16, 2 },
288 { ISD::UINT_TO_FP, MVT::f64, MVT::i16, 2 },
289 { ISD::SINT_TO_FP, MVT::f32, MVT::i32, 2 },
290 { ISD::UINT_TO_FP, MVT::f32, MVT::i32, 2 },
291 { ISD::SINT_TO_FP, MVT::f64, MVT::i32, 2 },
292 { ISD::UINT_TO_FP, MVT::f64, MVT::i32, 2 },
293 { ISD::SINT_TO_FP, MVT::f32, MVT::i64, 10 },
294 { ISD::UINT_TO_FP, MVT::f32, MVT::i64, 10 },
295 { ISD::SINT_TO_FP, MVT::f64, MVT::i64, 10 },
296 { ISD::UINT_TO_FP, MVT::f64, MVT::i64, 10 }
299 if (SrcTy.isInteger() && ST->hasNEON()) {
300 int Idx = ConvertCostTableLookup<MVT>(NEONIntegerConversionTbl,
301 array_lengthof(NEONIntegerConversionTbl),
302 ISD, DstTy.getSimpleVT(),
303 SrcTy.getSimpleVT());
305 return NEONIntegerConversionTbl[Idx].Cost;
308 // Scalar integer conversion costs.
309 static const TypeConversionCostTblEntry<MVT> ARMIntegerConversionTbl[] = {
310 // i16 -> i64 requires two dependent operations.
311 { ISD::SIGN_EXTEND, MVT::i64, MVT::i16, 2 },
313 // Truncates on i64 are assumed to be free.
314 { ISD::TRUNCATE, MVT::i32, MVT::i64, 0 },
315 { ISD::TRUNCATE, MVT::i16, MVT::i64, 0 },
316 { ISD::TRUNCATE, MVT::i8, MVT::i64, 0 },
317 { ISD::TRUNCATE, MVT::i1, MVT::i64, 0 }
320 if (SrcTy.isInteger()) {
322 ConvertCostTableLookup<MVT>(ARMIntegerConversionTbl,
323 array_lengthof(ARMIntegerConversionTbl),
324 ISD, DstTy.getSimpleVT(),
325 SrcTy.getSimpleVT());
327 return ARMIntegerConversionTbl[Idx].Cost;
330 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
333 unsigned ARMTTI::getVectorInstrCost(unsigned Opcode, Type *ValTy,
334 unsigned Index) const {
335 // Penalize inserting into an D-subregister. We end up with a three times
336 // lower estimated throughput on swift.
338 Opcode == Instruction::InsertElement &&
339 ValTy->isVectorTy() &&
340 ValTy->getScalarSizeInBits() <= 32)
343 return TargetTransformInfo::getVectorInstrCost(Opcode, ValTy, Index);
346 unsigned ARMTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
347 Type *CondTy) const {
349 int ISD = TLI->InstructionOpcodeToISD(Opcode);
350 // On NEON a a vector select gets lowered to vbsl.
351 if (ST->hasNEON() && ValTy->isVectorTy() && ISD == ISD::SELECT) {
352 // Lowering of some vector selects is currently far from perfect.
353 static const TypeConversionCostTblEntry<MVT> NEONVectorSelectTbl[] = {
354 { ISD::SELECT, MVT::v4i1, MVT::v4i8, 2*4 + 2*1 },
355 { ISD::SELECT, MVT::v8i1, MVT::v8i8, 2*8 + 1 },
356 { ISD::SELECT, MVT::v16i1, MVT::v16i8, 2*16 + 1 },
357 { ISD::SELECT, MVT::v4i1, MVT::v4i16, 2*4 + 1 },
358 { ISD::SELECT, MVT::v8i1, MVT::v8i16, 2*8 + 1 },
359 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 2*16 + 1 + 3*1 + 4*1 },
360 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 4*8 + 1*3 + 1*4 + 1*2 },
361 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 4*16 + 1*6 + 1*8 + 1*4 },
362 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4*4 + 1*2 + 1 },
363 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 },
364 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 100 }
367 EVT SelCondTy = TLI->getValueType(CondTy);
368 EVT SelValTy = TLI->getValueType(ValTy);
369 int Idx = ConvertCostTableLookup<MVT>(NEONVectorSelectTbl,
370 array_lengthof(NEONVectorSelectTbl),
371 ISD, SelCondTy.getSimpleVT(),
372 SelValTy.getSimpleVT());
374 return NEONVectorSelectTbl[Idx].Cost;
376 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
380 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
383 unsigned ARMTTI::getAddressComputationCost(Type *Ty) const {
384 // In many cases the address computation is not merged into the instruction
389 unsigned ARMTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
391 // We only handle costs of reverse shuffles for now.
392 if (Kind != SK_Reverse)
393 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
395 static const CostTblEntry<MVT> NEONShuffleTbl[] = {
396 // Reverse shuffle cost one instruction if we are shuffling within a double
397 // word (vrev) or two if we shuffle a quad word (vrev, vext).
398 { ISD::VECTOR_SHUFFLE, MVT::v2i32, 1 },
399 { ISD::VECTOR_SHUFFLE, MVT::v2f32, 1 },
400 { ISD::VECTOR_SHUFFLE, MVT::v2i64, 1 },
401 { ISD::VECTOR_SHUFFLE, MVT::v2f64, 1 },
403 { ISD::VECTOR_SHUFFLE, MVT::v4i32, 2 },
404 { ISD::VECTOR_SHUFFLE, MVT::v4f32, 2 },
405 { ISD::VECTOR_SHUFFLE, MVT::v8i16, 2 },
406 { ISD::VECTOR_SHUFFLE, MVT::v16i8, 2 }
409 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
411 int Idx = CostTableLookup<MVT>(NEONShuffleTbl, array_lengthof(NEONShuffleTbl),
412 ISD::VECTOR_SHUFFLE, LT.second);
414 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
416 return LT.first * NEONShuffleTbl[Idx].Cost;