1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #include "X86TargetTransformInfo.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/CodeGen/BasicTTIImpl.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/CostTable.h"
23 #include "llvm/Target/TargetLowering.h"
26 #define DEBUG_TYPE "x86tti"
28 //===----------------------------------------------------------------------===//
32 //===----------------------------------------------------------------------===//
34 TargetTransformInfo::PopcntSupportKind
35 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
36 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
37 // TODO: Currently the __builtin_popcount() implementation using SSE3
38 // instructions is inefficient. Once the problem is fixed, we should
39 // call ST->hasSSE3() instead of ST->hasPOPCNT().
40 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
43 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
44 if (Vector && !ST->hasSSE1())
48 if (Vector && ST->hasAVX512())
55 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
57 if (ST->hasAVX512()) return 512;
58 if (ST->hasAVX()) return 256;
59 if (ST->hasSSE1()) return 128;
69 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
70 // If the loop will not be vectorized, don't interleave the loop.
71 // Let regular unroll to unroll the loop, which saves the overflow
72 // check and memory check cost.
79 // Sandybridge and Haswell have multiple execution ports and pipelined
87 unsigned X86TTIImpl::getArithmeticInstrCost(
88 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
89 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
90 TTI::OperandValueProperties Opd2PropInfo) {
92 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
94 int ISD = TLI->InstructionOpcodeToISD(Opcode);
95 assert(ISD && "Invalid opcode");
97 if (ISD == ISD::SDIV &&
98 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
99 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
100 // On X86, vector signed division by constants power-of-two are
101 // normally expanded to the sequence SRA + SRL + ADD + SRA.
102 // The OperandValue properties many not be same as that of previous
103 // operation;conservatively assume OP_None.
105 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, Op2Info,
106 TargetTransformInfo::OP_None,
107 TargetTransformInfo::OP_None);
108 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
109 TargetTransformInfo::OP_None,
110 TargetTransformInfo::OP_None);
111 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
112 TargetTransformInfo::OP_None,
113 TargetTransformInfo::OP_None);
118 static const CostTblEntry<MVT::SimpleValueType>
119 AVX2UniformConstCostTable[] = {
120 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
122 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
123 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
124 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
125 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
128 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
130 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second);
132 return LT.first * AVX2UniformConstCostTable[Idx].Cost;
135 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTable[] = {
136 { ISD::SHL, MVT::v16i32, 1 },
137 { ISD::SRL, MVT::v16i32, 1 },
138 { ISD::SRA, MVT::v16i32, 1 },
139 { ISD::SHL, MVT::v8i64, 1 },
140 { ISD::SRL, MVT::v8i64, 1 },
141 { ISD::SRA, MVT::v8i64, 1 },
144 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
145 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
146 // customize them to detect the cases where shift amount is a scalar one.
147 { ISD::SHL, MVT::v4i32, 1 },
148 { ISD::SRL, MVT::v4i32, 1 },
149 { ISD::SRA, MVT::v4i32, 1 },
150 { ISD::SHL, MVT::v8i32, 1 },
151 { ISD::SRL, MVT::v8i32, 1 },
152 { ISD::SRA, MVT::v8i32, 1 },
153 { ISD::SHL, MVT::v2i64, 1 },
154 { ISD::SRL, MVT::v2i64, 1 },
155 { ISD::SHL, MVT::v4i64, 1 },
156 { ISD::SRL, MVT::v4i64, 1 },
158 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
159 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
161 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
162 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
164 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
165 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
166 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
168 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
169 { ISD::SDIV, MVT::v32i8, 32*20 },
170 { ISD::SDIV, MVT::v16i16, 16*20 },
171 { ISD::SDIV, MVT::v8i32, 8*20 },
172 { ISD::SDIV, MVT::v4i64, 4*20 },
173 { ISD::UDIV, MVT::v32i8, 32*20 },
174 { ISD::UDIV, MVT::v16i16, 16*20 },
175 { ISD::UDIV, MVT::v8i32, 8*20 },
176 { ISD::UDIV, MVT::v4i64, 4*20 },
179 if (ST->hasAVX512()) {
180 int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second);
182 return LT.first * AVX512CostTable[Idx].Cost;
184 // Look for AVX2 lowering tricks.
186 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
187 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
188 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
189 // On AVX2, a packed v16i16 shift left by a constant build_vector
190 // is lowered into a vector multiply (vpmullw).
193 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
195 return LT.first * AVX2CostTable[Idx].Cost;
198 static const CostTblEntry<MVT::SimpleValueType>
199 SSE2UniformConstCostTable[] = {
200 // We don't correctly identify costs of casts because they are marked as
202 // Constant splats are cheaper for the following instructions.
203 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
204 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
205 { ISD::SHL, MVT::v4i32, 1 }, // pslld
206 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
208 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
209 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
210 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
211 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
213 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
214 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
215 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
216 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle.
218 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
219 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
220 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
221 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
224 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
227 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
228 return LT.first * 15;
230 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
232 return LT.first * SSE2UniformConstCostTable[Idx].Cost;
235 if (ISD == ISD::SHL &&
236 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
238 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
239 (VT == MVT::v4i32 && ST->hasSSE41()))
240 // Vector shift left by non uniform constant can be lowered
241 // into vector multiply (pmullw/pmulld).
243 if (VT == MVT::v4i32 && ST->hasSSE2())
244 // A vector shift left by non uniform constant is converted
245 // into a vector multiply; the new multiply is eventually
246 // lowered into a sequence of shuffles and 2 x pmuludq.
250 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
251 // We don't correctly identify costs of casts because they are marked as
253 // For some cases, where the shift amount is a scalar we would be able
254 // to generate better code. Unfortunately, when this is the case the value
255 // (the splat) will get hoisted out of the loop, thereby making it invisible
256 // to ISel. The cost model must return worst case assumptions because it is
257 // used for vectorization and we don't want to make vectorized code worse
259 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
260 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
261 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
262 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
263 { ISD::SHL, MVT::v4i64, 8 }, // splat+shuffle sequence.
265 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
266 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
267 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
268 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
270 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
271 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
272 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
273 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
275 // It is not a good idea to vectorize division. We have to scalarize it and
276 // in the process we will often end up having to spilling regular
277 // registers. The overhead of division is going to dominate most kernels
278 // anyways so try hard to prevent vectorization of division - it is
279 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
280 // to hide "20 cycles" for each lane.
281 { ISD::SDIV, MVT::v16i8, 16*20 },
282 { ISD::SDIV, MVT::v8i16, 8*20 },
283 { ISD::SDIV, MVT::v4i32, 4*20 },
284 { ISD::SDIV, MVT::v2i64, 2*20 },
285 { ISD::UDIV, MVT::v16i8, 16*20 },
286 { ISD::UDIV, MVT::v8i16, 8*20 },
287 { ISD::UDIV, MVT::v4i32, 4*20 },
288 { ISD::UDIV, MVT::v2i64, 2*20 },
292 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second);
294 return LT.first * SSE2CostTable[Idx].Cost;
297 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
298 // We don't have to scalarize unsupported ops. We can issue two half-sized
299 // operations and we only need to extract the upper YMM half.
300 // Two ops + 1 extract + 1 insert = 4.
301 { ISD::MUL, MVT::v16i16, 4 },
302 { ISD::MUL, MVT::v8i32, 4 },
303 { ISD::SUB, MVT::v8i32, 4 },
304 { ISD::ADD, MVT::v8i32, 4 },
305 { ISD::SUB, MVT::v4i64, 4 },
306 { ISD::ADD, MVT::v4i64, 4 },
307 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
308 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
309 // Because we believe v4i64 to be a legal type, we must also include the
310 // split factor of two in the cost table. Therefore, the cost here is 18
312 { ISD::MUL, MVT::v4i64, 18 },
315 // Look for AVX1 lowering tricks.
316 if (ST->hasAVX() && !ST->hasAVX2()) {
319 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
320 // sequence of extract + two vector multiply + insert.
321 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) &&
322 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)
325 int Idx = CostTableLookup(AVX1CostTable, ISD, VT);
327 return LT.first * AVX1CostTable[Idx].Cost;
330 // Custom lowering of vectors.
331 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
332 // A v2i64/v4i64 and multiply is custom lowered as a series of long
333 // multiplies(3), shifts(4) and adds(2).
334 { ISD::MUL, MVT::v2i64, 9 },
335 { ISD::MUL, MVT::v4i64, 9 },
337 int Idx = CostTableLookup(CustomLowered, ISD, LT.second);
339 return LT.first * CustomLowered[Idx].Cost;
341 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
342 // 2x pmuludq, 2x shuffle.
343 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
347 // Fallback to the default implementation.
348 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
351 unsigned X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
353 // We only estimate the cost of reverse and alternate shuffles.
354 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
355 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
357 if (Kind == TTI::SK_Reverse) {
358 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
360 if (LT.second.getSizeInBits() > 128)
361 Cost = 3; // Extract + insert + copy.
363 // Multiple by the number of parts.
364 return Cost * LT.first;
367 if (Kind == TTI::SK_Alternate) {
368 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
369 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
370 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
372 // The backend knows how to generate a single VEX.256 version of
373 // instruction VPBLENDW if the target supports AVX2.
374 if (ST->hasAVX2() && LT.second == MVT::v16i16)
377 static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = {
378 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd
379 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd
381 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps
382 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps
384 // This shuffle is custom lowered into a sequence of:
385 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128
386 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
388 // This shuffle is custom lowered into a long sequence of:
389 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128
390 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
394 int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
396 return LT.first * AVXAltShuffleTbl[Idx].Cost;
399 static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = {
400 // These are lowered into movsd.
401 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
402 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
404 // packed float vectors with four elements are lowered into BLENDI dag
405 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
406 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
407 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
409 // This shuffle generates a single pshufw.
410 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
412 // There is no instruction that matches a v16i8 alternate shuffle.
413 // The backend will expand it into the sequence 'pshufb + pshufb + or'.
414 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
417 if (ST->hasSSE41()) {
418 int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
420 return LT.first * SSE41AltShuffleTbl[Idx].Cost;
423 static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = {
424 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
425 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
427 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
428 // the sequence 'shufps + pshufd'
429 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
430 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
432 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
433 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or
436 if (ST->hasSSSE3()) {
437 int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
439 return LT.first * SSSE3AltShuffleTbl[Idx].Cost;
442 static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = {
443 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
444 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
446 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
447 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
449 // This is expanded into a long sequence of four extract + four insert.
450 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
452 // 8 x (pinsrw + pextrw + and + movb + movzb + or)
453 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
456 // Fall-back (SSE3 and SSE2).
457 int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
459 return LT.first * SSEAltShuffleTbl[Idx].Cost;
460 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
463 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
466 unsigned X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
467 int ISD = TLI->InstructionOpcodeToISD(Opcode);
468 assert(ISD && "Invalid opcode");
470 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
471 AVX512ConversionTbl[] = {
472 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
473 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
474 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
475 { ISD::FP_ROUND, MVT::v16f32, MVT::v8f64, 3 },
477 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
478 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
479 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
480 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
481 { ISD::TRUNCATE, MVT::v16i32, MVT::v8i64, 4 },
483 // v16i1 -> v16i32 - load + broadcast
484 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
485 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
487 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
488 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
489 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
490 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
491 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
492 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
494 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
495 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
496 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
497 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
498 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
499 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
500 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
503 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
504 AVX2ConversionTbl[] = {
505 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
506 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
507 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
508 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
509 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
510 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
511 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
512 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
513 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
514 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
515 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
516 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
517 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
518 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
519 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
520 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
522 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
523 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
524 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
525 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
526 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
527 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
529 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
530 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
532 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
535 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
536 AVXConversionTbl[] = {
537 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
538 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
539 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
540 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
541 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
542 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
543 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
544 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
545 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
546 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
547 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
548 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
549 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
550 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
551 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
552 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
554 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
555 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
556 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
557 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
558 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
559 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
560 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
562 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
563 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
564 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
565 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
566 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
567 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
568 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
569 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
570 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
571 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
572 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
573 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
575 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
576 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
577 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
578 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
579 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
580 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
581 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
582 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
583 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
584 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
585 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
586 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
587 // The generic code to compute the scalar overhead is currently broken.
588 // Workaround this limitation by estimating the scalarization overhead
589 // here. We have roughly 10 instructions per scalar element.
590 // Multiply that by the vector width.
591 // FIXME: remove that when PR19268 is fixed.
592 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
593 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
595 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
596 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
597 // This node is expanded into scalarized operations but BasicTTI is overly
598 // optimistic estimating its cost. It computes 3 per element (one
599 // vector-extract, one scalar conversion and one vector-insert). The
600 // problem is that the inserts form a read-modify-write chain so latency
601 // should be factored in too. Inflating the cost per element by 1.
602 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
603 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
606 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
608 // These are somewhat magic numbers justified by looking at the output of
609 // Intel's IACA, running some kernels and making sure when we take
610 // legalization into account the throughput will be overestimated.
611 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
612 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
613 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
614 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
615 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
616 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
617 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
618 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
619 // There are faster sequences for float conversions.
620 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
621 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
622 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
623 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
624 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
625 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
626 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
627 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
630 std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
631 std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
633 if (ST->hasSSE2() && !ST->hasAVX()) {
635 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second);
637 return LTSrc.first * SSE2ConvTbl[Idx].Cost;
640 if (ST->hasAVX512()) {
641 int Idx = ConvertCostTableLookup(AVX512ConversionTbl, ISD, LTDest.second,
644 return AVX512ConversionTbl[Idx].Cost;
647 EVT SrcTy = TLI->getValueType(DL, Src);
648 EVT DstTy = TLI->getValueType(DL, Dst);
650 // The function getSimpleVT only handles simple value types.
651 if (!SrcTy.isSimple() || !DstTy.isSimple())
652 return BaseT::getCastInstrCost(Opcode, Dst, Src);
655 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
656 DstTy.getSimpleVT(), SrcTy.getSimpleVT());
658 return AVX2ConversionTbl[Idx].Cost;
662 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
663 SrcTy.getSimpleVT());
665 return AVXConversionTbl[Idx].Cost;
668 return BaseT::getCastInstrCost(Opcode, Dst, Src);
671 unsigned X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
673 // Legalize the type.
674 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
678 int ISD = TLI->InstructionOpcodeToISD(Opcode);
679 assert(ISD && "Invalid opcode");
681 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
682 { ISD::SETCC, MVT::v2f64, 1 },
683 { ISD::SETCC, MVT::v4f32, 1 },
684 { ISD::SETCC, MVT::v2i64, 1 },
685 { ISD::SETCC, MVT::v4i32, 1 },
686 { ISD::SETCC, MVT::v8i16, 1 },
687 { ISD::SETCC, MVT::v16i8, 1 },
690 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
691 { ISD::SETCC, MVT::v4f64, 1 },
692 { ISD::SETCC, MVT::v8f32, 1 },
693 // AVX1 does not support 8-wide integer compare.
694 { ISD::SETCC, MVT::v4i64, 4 },
695 { ISD::SETCC, MVT::v8i32, 4 },
696 { ISD::SETCC, MVT::v16i16, 4 },
697 { ISD::SETCC, MVT::v32i8, 4 },
700 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
701 { ISD::SETCC, MVT::v4i64, 1 },
702 { ISD::SETCC, MVT::v8i32, 1 },
703 { ISD::SETCC, MVT::v16i16, 1 },
704 { ISD::SETCC, MVT::v32i8, 1 },
707 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTbl[] = {
708 { ISD::SETCC, MVT::v8i64, 1 },
709 { ISD::SETCC, MVT::v16i32, 1 },
710 { ISD::SETCC, MVT::v8f64, 1 },
711 { ISD::SETCC, MVT::v16f32, 1 },
714 if (ST->hasAVX512()) {
715 int Idx = CostTableLookup(AVX512CostTbl, ISD, MTy);
717 return LT.first * AVX512CostTbl[Idx].Cost;
721 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy);
723 return LT.first * AVX2CostTbl[Idx].Cost;
727 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy);
729 return LT.first * AVX1CostTbl[Idx].Cost;
732 if (ST->hasSSE42()) {
733 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy);
735 return LT.first * SSE42CostTbl[Idx].Cost;
738 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
741 unsigned X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
743 assert(Val->isVectorTy() && "This must be a vector type");
746 // Legalize the type.
747 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
749 // This type is legalized to a scalar type.
750 if (!LT.second.isVector())
753 // The type may be split. Normalize the index to the new type.
754 unsigned Width = LT.second.getVectorNumElements();
755 Index = Index % Width;
757 // Floating point scalars are already located in index #0.
758 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
762 return BaseT::getVectorInstrCost(Opcode, Val, Index);
765 unsigned X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert,
767 assert (Ty->isVectorTy() && "Can only scalarize vectors");
770 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
772 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
774 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
780 unsigned X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
782 unsigned AddressSpace) {
783 // Handle non-power-of-two vectors such as <3 x float>
784 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
785 unsigned NumElem = VTy->getVectorNumElements();
787 // Handle a few common cases:
789 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
790 // Cost = 64 bit store + extract + 32 bit store.
794 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
795 // Cost = 128 bit store + unpack + 64 bit store.
798 // Assume that all other non-power-of-two numbers are scalarized.
799 if (!isPowerOf2_32(NumElem)) {
800 unsigned Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(),
801 Alignment, AddressSpace);
802 unsigned SplitCost = getScalarizationOverhead(Src,
803 Opcode == Instruction::Load,
804 Opcode==Instruction::Store);
805 return NumElem * Cost + SplitCost;
809 // Legalize the type.
810 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
811 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
814 // Each load/store unit costs 1.
815 unsigned Cost = LT.first * 1;
817 // On Sandybridge 256bit load/stores are double pumped
818 // (but not on Haswell).
819 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
825 unsigned X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
827 unsigned AddressSpace) {
828 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
830 // To calculate scalar take the regular cost, without mask
831 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
833 unsigned NumElem = SrcVTy->getVectorNumElements();
835 VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem);
836 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy, 1)) ||
837 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy, 1)) ||
838 !isPowerOf2_32(NumElem)) {
840 unsigned MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
841 unsigned ScalarCompareCost =
842 getCmpSelInstrCost(Instruction::ICmp,
843 Type::getInt8Ty(getGlobalContext()), NULL);
844 unsigned BranchCost = getCFInstrCost(Instruction::Br);
845 unsigned MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
847 unsigned ValueSplitCost =
848 getScalarizationOverhead(SrcVTy, Opcode == Instruction::Load,
849 Opcode == Instruction::Store);
851 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
852 Alignment, AddressSpace);
853 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
856 // Legalize the type.
857 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
859 if (LT.second != TLI->getValueType(DL, SrcVTy).getSimpleVT() &&
860 LT.second.getVectorNumElements() == NumElem)
861 // Promotion requires expand/truncate for data and a shuffle for mask.
862 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, 0) +
863 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, 0);
865 else if (LT.second.getVectorNumElements() > NumElem) {
866 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
867 LT.second.getVectorNumElements());
868 // Expanding requires fill mask with zeroes
869 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
871 if (!ST->hasAVX512())
872 return Cost + LT.first*4; // Each maskmov costs 4
874 // AVX-512 masked load/store is cheapper
875 return Cost+LT.first;
878 unsigned X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
879 // Address computations in vectorized code with non-consecutive addresses will
880 // likely result in more instructions compared to scalar code where the
881 // computation can more often be merged into the index mode. The resulting
882 // extra micro-ops can significantly decrease throughput.
883 unsigned NumVectorInstToHideOverhead = 10;
885 if (Ty->isVectorTy() && IsComplex)
886 return NumVectorInstToHideOverhead;
888 return BaseT::getAddressComputationCost(Ty, IsComplex);
891 unsigned X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
894 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
898 int ISD = TLI->InstructionOpcodeToISD(Opcode);
899 assert(ISD && "Invalid opcode");
901 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
902 // and make it as the cost.
904 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
905 { ISD::FADD, MVT::v2f64, 2 },
906 { ISD::FADD, MVT::v4f32, 4 },
907 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
908 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
909 { ISD::ADD, MVT::v8i16, 5 },
912 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
913 { ISD::FADD, MVT::v4f32, 4 },
914 { ISD::FADD, MVT::v4f64, 5 },
915 { ISD::FADD, MVT::v8f32, 7 },
916 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
917 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
918 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
919 { ISD::ADD, MVT::v8i16, 5 },
920 { ISD::ADD, MVT::v8i32, 5 },
923 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
924 { ISD::FADD, MVT::v2f64, 2 },
925 { ISD::FADD, MVT::v4f32, 4 },
926 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
927 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
928 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
931 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
932 { ISD::FADD, MVT::v4f32, 3 },
933 { ISD::FADD, MVT::v4f64, 3 },
934 { ISD::FADD, MVT::v8f32, 4 },
935 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
936 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
937 { ISD::ADD, MVT::v4i64, 3 },
938 { ISD::ADD, MVT::v8i16, 4 },
939 { ISD::ADD, MVT::v8i32, 5 },
944 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
946 return LT.first * AVX1CostTblPairWise[Idx].Cost;
949 if (ST->hasSSE42()) {
950 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
952 return LT.first * SSE42CostTblPairWise[Idx].Cost;
956 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy);
958 return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
961 if (ST->hasSSE42()) {
962 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
964 return LT.first * SSE42CostTblNoPairWise[Idx].Cost;
968 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
971 /// \brief Calculate the cost of materializing a 64-bit value. This helper
972 /// method might only calculate a fraction of a larger immediate. Therefore it
973 /// is valid to return a cost of ZERO.
974 unsigned X86TTIImpl::getIntImmCost(int64_t Val) {
976 return TTI::TCC_Free;
979 return TTI::TCC_Basic;
981 return 2 * TTI::TCC_Basic;
984 unsigned X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
985 assert(Ty->isIntegerTy());
987 unsigned BitSize = Ty->getPrimitiveSizeInBits();
991 // Never hoist constants larger than 128bit, because this might lead to
992 // incorrect code generation or assertions in codegen.
993 // Fixme: Create a cost model for types larger than i128 once the codegen
994 // issues have been fixed.
996 return TTI::TCC_Free;
999 return TTI::TCC_Free;
1001 // Sign-extend all constants to a multiple of 64-bit.
1004 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1006 // Split the constant into 64-bit chunks and calculate the cost for each
1009 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1010 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1011 int64_t Val = Tmp.getSExtValue();
1012 Cost += getIntImmCost(Val);
1014 // We need at least one instruction to materialze the constant.
1015 return std::max(1U, Cost);
1018 unsigned X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
1019 const APInt &Imm, Type *Ty) {
1020 assert(Ty->isIntegerTy());
1022 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1023 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1024 // here, so that constant hoisting will ignore this constant.
1026 return TTI::TCC_Free;
1028 unsigned ImmIdx = ~0U;
1031 return TTI::TCC_Free;
1032 case Instruction::GetElementPtr:
1033 // Always hoist the base address of a GetElementPtr. This prevents the
1034 // creation of new constants for every base constant that gets constant
1035 // folded with the offset.
1037 return 2 * TTI::TCC_Basic;
1038 return TTI::TCC_Free;
1039 case Instruction::Store:
1042 case Instruction::Add:
1043 case Instruction::Sub:
1044 case Instruction::Mul:
1045 case Instruction::UDiv:
1046 case Instruction::SDiv:
1047 case Instruction::URem:
1048 case Instruction::SRem:
1049 case Instruction::And:
1050 case Instruction::Or:
1051 case Instruction::Xor:
1052 case Instruction::ICmp:
1055 // Always return TCC_Free for the shift value of a shift instruction.
1056 case Instruction::Shl:
1057 case Instruction::LShr:
1058 case Instruction::AShr:
1060 return TTI::TCC_Free;
1062 case Instruction::Trunc:
1063 case Instruction::ZExt:
1064 case Instruction::SExt:
1065 case Instruction::IntToPtr:
1066 case Instruction::PtrToInt:
1067 case Instruction::BitCast:
1068 case Instruction::PHI:
1069 case Instruction::Call:
1070 case Instruction::Select:
1071 case Instruction::Ret:
1072 case Instruction::Load:
1076 if (Idx == ImmIdx) {
1077 unsigned NumConstants = (BitSize + 63) / 64;
1078 unsigned Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1079 return (Cost <= NumConstants * TTI::TCC_Basic)
1080 ? static_cast<unsigned>(TTI::TCC_Free)
1084 return X86TTIImpl::getIntImmCost(Imm, Ty);
1087 unsigned X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
1088 const APInt &Imm, Type *Ty) {
1089 assert(Ty->isIntegerTy());
1091 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1092 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1093 // here, so that constant hoisting will ignore this constant.
1095 return TTI::TCC_Free;
1099 return TTI::TCC_Free;
1100 case Intrinsic::sadd_with_overflow:
1101 case Intrinsic::uadd_with_overflow:
1102 case Intrinsic::ssub_with_overflow:
1103 case Intrinsic::usub_with_overflow:
1104 case Intrinsic::smul_with_overflow:
1105 case Intrinsic::umul_with_overflow:
1106 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1107 return TTI::TCC_Free;
1109 case Intrinsic::experimental_stackmap:
1110 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1111 return TTI::TCC_Free;
1113 case Intrinsic::experimental_patchpoint_void:
1114 case Intrinsic::experimental_patchpoint_i64:
1115 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1116 return TTI::TCC_Free;
1119 return X86TTIImpl::getIntImmCost(Imm, Ty);
1122 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) {
1123 int DataWidth = DataTy->getPrimitiveSizeInBits();
1125 // Todo: AVX512 allows gather/scatter, works with strided and random as well
1126 if ((DataWidth < 32) || (Consecutive == 0))
1128 if (ST->hasAVX512() || ST->hasAVX2())
1133 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, int Consecutive) {
1134 return isLegalMaskedLoad(DataType, Consecutive);
1137 bool X86TTIImpl::hasCompatibleFunctionAttributes(const Function *Caller,
1138 const Function *Callee) const {
1139 const TargetMachine &TM = getTLI()->getTargetMachine();
1141 // Work this as a subsetting of subtarget features.
1142 const FeatureBitset &CallerBits =
1143 TM.getSubtargetImpl(*Caller)->getFeatureBits();
1144 const FeatureBitset &CalleeBits =
1145 TM.getSubtargetImpl(*Callee)->getFeatureBits();
1147 // FIXME: This is likely too limiting as it will include subtarget features
1148 // that we might not care about for inlining, but it is conservatively
1150 return (CallerBits & CalleeBits) == CalleeBits;