1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #include "X86TargetTransformInfo.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/CodeGen/BasicTTIImpl.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/CostTable.h"
23 #include "llvm/Target/TargetLowering.h"
26 #define DEBUG_TYPE "x86tti"
28 //===----------------------------------------------------------------------===//
32 //===----------------------------------------------------------------------===//
34 TargetTransformInfo::PopcntSupportKind
35 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
36 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
37 // TODO: Currently the __builtin_popcount() implementation using SSE3
38 // instructions is inefficient. Once the problem is fixed, we should
39 // call ST->hasSSE3() instead of ST->hasPOPCNT().
40 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
43 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
44 if (Vector && !ST->hasSSE1())
48 if (Vector && ST->hasAVX512())
55 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
57 if (ST->hasAVX512()) return 512;
58 if (ST->hasAVX()) return 256;
59 if (ST->hasSSE1()) return 128;
69 unsigned X86TTIImpl::getMaxInterleaveFactor() {
73 // Sandybridge and Haswell have multiple execution ports and pipelined
81 unsigned X86TTIImpl::getArithmeticInstrCost(
82 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
83 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
84 TTI::OperandValueProperties Opd2PropInfo) {
86 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
88 int ISD = TLI->InstructionOpcodeToISD(Opcode);
89 assert(ISD && "Invalid opcode");
91 if (ISD == ISD::SDIV &&
92 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
93 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
94 // On X86, vector signed division by constants power-of-two are
95 // normally expanded to the sequence SRA + SRL + ADD + SRA.
96 // The OperandValue properties many not be same as that of previous
97 // operation;conservatively assume OP_None.
99 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, Op2Info,
100 TargetTransformInfo::OP_None,
101 TargetTransformInfo::OP_None);
102 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
103 TargetTransformInfo::OP_None,
104 TargetTransformInfo::OP_None);
105 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
106 TargetTransformInfo::OP_None,
107 TargetTransformInfo::OP_None);
112 static const CostTblEntry<MVT::SimpleValueType>
113 AVX2UniformConstCostTable[] = {
114 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
115 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
116 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
117 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
120 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
122 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second);
124 return LT.first * AVX2UniformConstCostTable[Idx].Cost;
127 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTable[] = {
128 { ISD::SHL, MVT::v16i32, 1 },
129 { ISD::SRL, MVT::v16i32, 1 },
130 { ISD::SRA, MVT::v16i32, 1 },
131 { ISD::SHL, MVT::v8i64, 1 },
132 { ISD::SRL, MVT::v8i64, 1 },
133 { ISD::SRA, MVT::v8i64, 1 },
136 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
137 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
138 // customize them to detect the cases where shift amount is a scalar one.
139 { ISD::SHL, MVT::v4i32, 1 },
140 { ISD::SRL, MVT::v4i32, 1 },
141 { ISD::SRA, MVT::v4i32, 1 },
142 { ISD::SHL, MVT::v8i32, 1 },
143 { ISD::SRL, MVT::v8i32, 1 },
144 { ISD::SRA, MVT::v8i32, 1 },
145 { ISD::SHL, MVT::v2i64, 1 },
146 { ISD::SRL, MVT::v2i64, 1 },
147 { ISD::SHL, MVT::v4i64, 1 },
148 { ISD::SRL, MVT::v4i64, 1 },
150 { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence.
151 { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized.
153 { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized.
154 { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized.
156 { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized.
157 { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized.
158 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
160 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
161 { ISD::SDIV, MVT::v32i8, 32*20 },
162 { ISD::SDIV, MVT::v16i16, 16*20 },
163 { ISD::SDIV, MVT::v8i32, 8*20 },
164 { ISD::SDIV, MVT::v4i64, 4*20 },
165 { ISD::UDIV, MVT::v32i8, 32*20 },
166 { ISD::UDIV, MVT::v16i16, 16*20 },
167 { ISD::UDIV, MVT::v8i32, 8*20 },
168 { ISD::UDIV, MVT::v4i64, 4*20 },
171 if (ST->hasAVX512()) {
172 int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second);
174 return LT.first * AVX512CostTable[Idx].Cost;
176 // Look for AVX2 lowering tricks.
178 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
179 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
180 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
181 // On AVX2, a packed v16i16 shift left by a constant build_vector
182 // is lowered into a vector multiply (vpmullw).
185 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
187 return LT.first * AVX2CostTable[Idx].Cost;
190 static const CostTblEntry<MVT::SimpleValueType>
191 SSE2UniformConstCostTable[] = {
192 // We don't correctly identify costs of casts because they are marked as
194 // Constant splats are cheaper for the following instructions.
195 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
196 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
197 { ISD::SHL, MVT::v4i32, 1 }, // pslld
198 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
200 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
201 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
202 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
203 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
205 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
206 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
207 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
209 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
210 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
211 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
212 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
215 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
218 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
219 return LT.first * 15;
221 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
223 return LT.first * SSE2UniformConstCostTable[Idx].Cost;
226 if (ISD == ISD::SHL &&
227 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
229 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
230 (VT == MVT::v4i32 && ST->hasSSE41()))
231 // Vector shift left by non uniform constant can be lowered
232 // into vector multiply (pmullw/pmulld).
234 if (VT == MVT::v4i32 && ST->hasSSE2())
235 // A vector shift left by non uniform constant is converted
236 // into a vector multiply; the new multiply is eventually
237 // lowered into a sequence of shuffles and 2 x pmuludq.
241 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
242 // We don't correctly identify costs of casts because they are marked as
244 // For some cases, where the shift amount is a scalar we would be able
245 // to generate better code. Unfortunately, when this is the case the value
246 // (the splat) will get hoisted out of the loop, thereby making it invisible
247 // to ISel. The cost model must return worst case assumptions because it is
248 // used for vectorization and we don't want to make vectorized code worse
250 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence.
251 { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized.
252 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
253 { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized.
254 { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized.
256 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized.
257 { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized.
258 { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized.
259 { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized.
261 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized.
262 { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized.
263 { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
264 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
266 // It is not a good idea to vectorize division. We have to scalarize it and
267 // in the process we will often end up having to spilling regular
268 // registers. The overhead of division is going to dominate most kernels
269 // anyways so try hard to prevent vectorization of division - it is
270 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
271 // to hide "20 cycles" for each lane.
272 { ISD::SDIV, MVT::v16i8, 16*20 },
273 { ISD::SDIV, MVT::v8i16, 8*20 },
274 { ISD::SDIV, MVT::v4i32, 4*20 },
275 { ISD::SDIV, MVT::v2i64, 2*20 },
276 { ISD::UDIV, MVT::v16i8, 16*20 },
277 { ISD::UDIV, MVT::v8i16, 8*20 },
278 { ISD::UDIV, MVT::v4i32, 4*20 },
279 { ISD::UDIV, MVT::v2i64, 2*20 },
283 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second);
285 return LT.first * SSE2CostTable[Idx].Cost;
288 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
289 // We don't have to scalarize unsupported ops. We can issue two half-sized
290 // operations and we only need to extract the upper YMM half.
291 // Two ops + 1 extract + 1 insert = 4.
292 { ISD::MUL, MVT::v16i16, 4 },
293 { ISD::MUL, MVT::v8i32, 4 },
294 { ISD::SUB, MVT::v8i32, 4 },
295 { ISD::ADD, MVT::v8i32, 4 },
296 { ISD::SUB, MVT::v4i64, 4 },
297 { ISD::ADD, MVT::v4i64, 4 },
298 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
299 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
300 // Because we believe v4i64 to be a legal type, we must also include the
301 // split factor of two in the cost table. Therefore, the cost here is 18
303 { ISD::MUL, MVT::v4i64, 18 },
306 // Look for AVX1 lowering tricks.
307 if (ST->hasAVX() && !ST->hasAVX2()) {
310 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
311 // sequence of extract + two vector multiply + insert.
312 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) &&
313 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)
316 int Idx = CostTableLookup(AVX1CostTable, ISD, VT);
318 return LT.first * AVX1CostTable[Idx].Cost;
321 // Custom lowering of vectors.
322 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
323 // A v2i64/v4i64 and multiply is custom lowered as a series of long
324 // multiplies(3), shifts(4) and adds(2).
325 { ISD::MUL, MVT::v2i64, 9 },
326 { ISD::MUL, MVT::v4i64, 9 },
328 int Idx = CostTableLookup(CustomLowered, ISD, LT.second);
330 return LT.first * CustomLowered[Idx].Cost;
332 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
333 // 2x pmuludq, 2x shuffle.
334 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
338 // Fallback to the default implementation.
339 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
342 unsigned X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
344 // We only estimate the cost of reverse and alternate shuffles.
345 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
346 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
348 if (Kind == TTI::SK_Reverse) {
349 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
351 if (LT.second.getSizeInBits() > 128)
352 Cost = 3; // Extract + insert + copy.
354 // Multiple by the number of parts.
355 return Cost * LT.first;
358 if (Kind == TTI::SK_Alternate) {
359 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
360 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
361 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
363 // The backend knows how to generate a single VEX.256 version of
364 // instruction VPBLENDW if the target supports AVX2.
365 if (ST->hasAVX2() && LT.second == MVT::v16i16)
368 static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = {
369 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd
370 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd
372 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps
373 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps
375 // This shuffle is custom lowered into a sequence of:
376 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128
377 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
379 // This shuffle is custom lowered into a long sequence of:
380 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128
381 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
385 int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
387 return LT.first * AVXAltShuffleTbl[Idx].Cost;
390 static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = {
391 // These are lowered into movsd.
392 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
393 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
395 // packed float vectors with four elements are lowered into BLENDI dag
396 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
397 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
398 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
400 // This shuffle generates a single pshufw.
401 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
403 // There is no instruction that matches a v16i8 alternate shuffle.
404 // The backend will expand it into the sequence 'pshufb + pshufb + or'.
405 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
408 if (ST->hasSSE41()) {
409 int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
411 return LT.first * SSE41AltShuffleTbl[Idx].Cost;
414 static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = {
415 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
416 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
418 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
419 // the sequence 'shufps + pshufd'
420 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
421 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
423 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
424 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or
427 if (ST->hasSSSE3()) {
428 int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
430 return LT.first * SSSE3AltShuffleTbl[Idx].Cost;
433 static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = {
434 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
435 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
437 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
438 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
440 // This is expanded into a long sequence of four extract + four insert.
441 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
443 // 8 x (pinsrw + pextrw + and + movb + movzb + or)
444 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
447 // Fall-back (SSE3 and SSE2).
448 int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
450 return LT.first * SSEAltShuffleTbl[Idx].Cost;
451 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
454 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
457 unsigned X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
458 int ISD = TLI->InstructionOpcodeToISD(Opcode);
459 assert(ISD && "Invalid opcode");
461 std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src);
462 std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst);
464 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
466 // These are somewhat magic numbers justified by looking at the output of
467 // Intel's IACA, running some kernels and making sure when we take
468 // legalization into account the throughput will be overestimated.
469 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
470 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
471 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
472 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
473 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
474 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
475 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
476 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
477 // There are faster sequences for float conversions.
478 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
479 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
480 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
481 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
482 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
483 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
484 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
485 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
488 if (ST->hasSSE2() && !ST->hasAVX()) {
490 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second);
492 return LTSrc.first * SSE2ConvTbl[Idx].Cost;
495 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
496 AVX512ConversionTbl[] = {
497 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
498 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
499 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
500 { ISD::FP_ROUND, MVT::v16f32, MVT::v8f64, 3 },
502 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
503 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
504 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
505 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
506 { ISD::TRUNCATE, MVT::v16i32, MVT::v8i64, 4 },
508 // v16i1 -> v16i32 - load + broadcast
509 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
510 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
512 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
513 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
514 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
515 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
516 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
517 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
519 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
520 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
521 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
522 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
523 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
524 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
525 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
528 if (ST->hasAVX512()) {
529 int Idx = ConvertCostTableLookup(AVX512ConversionTbl, ISD, LTDest.second,
532 return AVX512ConversionTbl[Idx].Cost;
534 EVT SrcTy = TLI->getValueType(Src);
535 EVT DstTy = TLI->getValueType(Dst);
537 // The function getSimpleVT only handles simple value types.
538 if (!SrcTy.isSimple() || !DstTy.isSimple())
539 return BaseT::getCastInstrCost(Opcode, Dst, Src);
541 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
542 AVX2ConversionTbl[] = {
543 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
544 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
545 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
546 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
547 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
548 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
549 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
550 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
551 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
552 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
553 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
554 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
555 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
556 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
557 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
558 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
560 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
561 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
562 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
563 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
564 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
565 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
567 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
568 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
570 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
573 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
574 AVXConversionTbl[] = {
575 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
576 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
577 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
578 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
579 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
580 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
581 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
582 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
583 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
584 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
585 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
586 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
587 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
588 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
589 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
590 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
592 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
593 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
594 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
595 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
596 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
597 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
598 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
600 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
601 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
602 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
603 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
604 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
605 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
606 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
607 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
608 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
609 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
610 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
611 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
613 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
614 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
615 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
616 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
617 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
618 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
619 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
620 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
621 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
622 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
623 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
624 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
625 // The generic code to compute the scalar overhead is currently broken.
626 // Workaround this limitation by estimating the scalarization overhead
627 // here. We have roughly 10 instructions per scalar element.
628 // Multiply that by the vector width.
629 // FIXME: remove that when PR19268 is fixed.
630 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
631 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
633 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
634 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
635 // This node is expanded into scalarized operations but BasicTTI is overly
636 // optimistic estimating its cost. It computes 3 per element (one
637 // vector-extract, one scalar conversion and one vector-insert). The
638 // problem is that the inserts form a read-modify-write chain so latency
639 // should be factored in too. Inflating the cost per element by 1.
640 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
641 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
645 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
646 DstTy.getSimpleVT(), SrcTy.getSimpleVT());
648 return AVX2ConversionTbl[Idx].Cost;
652 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
653 SrcTy.getSimpleVT());
655 return AVXConversionTbl[Idx].Cost;
658 return BaseT::getCastInstrCost(Opcode, Dst, Src);
661 unsigned X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
663 // Legalize the type.
664 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
668 int ISD = TLI->InstructionOpcodeToISD(Opcode);
669 assert(ISD && "Invalid opcode");
671 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
672 { ISD::SETCC, MVT::v2f64, 1 },
673 { ISD::SETCC, MVT::v4f32, 1 },
674 { ISD::SETCC, MVT::v2i64, 1 },
675 { ISD::SETCC, MVT::v4i32, 1 },
676 { ISD::SETCC, MVT::v8i16, 1 },
677 { ISD::SETCC, MVT::v16i8, 1 },
680 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
681 { ISD::SETCC, MVT::v4f64, 1 },
682 { ISD::SETCC, MVT::v8f32, 1 },
683 // AVX1 does not support 8-wide integer compare.
684 { ISD::SETCC, MVT::v4i64, 4 },
685 { ISD::SETCC, MVT::v8i32, 4 },
686 { ISD::SETCC, MVT::v16i16, 4 },
687 { ISD::SETCC, MVT::v32i8, 4 },
690 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
691 { ISD::SETCC, MVT::v4i64, 1 },
692 { ISD::SETCC, MVT::v8i32, 1 },
693 { ISD::SETCC, MVT::v16i16, 1 },
694 { ISD::SETCC, MVT::v32i8, 1 },
697 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTbl[] = {
698 { ISD::SETCC, MVT::v8i64, 1 },
699 { ISD::SETCC, MVT::v16i32, 1 },
700 { ISD::SETCC, MVT::v8f64, 1 },
701 { ISD::SETCC, MVT::v16f32, 1 },
704 if (ST->hasAVX512()) {
705 int Idx = CostTableLookup(AVX512CostTbl, ISD, MTy);
707 return LT.first * AVX512CostTbl[Idx].Cost;
711 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy);
713 return LT.first * AVX2CostTbl[Idx].Cost;
717 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy);
719 return LT.first * AVX1CostTbl[Idx].Cost;
722 if (ST->hasSSE42()) {
723 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy);
725 return LT.first * SSE42CostTbl[Idx].Cost;
728 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
731 unsigned X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
733 assert(Val->isVectorTy() && "This must be a vector type");
736 // Legalize the type.
737 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val);
739 // This type is legalized to a scalar type.
740 if (!LT.second.isVector())
743 // The type may be split. Normalize the index to the new type.
744 unsigned Width = LT.second.getVectorNumElements();
745 Index = Index % Width;
747 // Floating point scalars are already located in index #0.
748 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
752 return BaseT::getVectorInstrCost(Opcode, Val, Index);
755 unsigned X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert,
757 assert (Ty->isVectorTy() && "Can only scalarize vectors");
760 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
762 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
764 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
770 unsigned X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
772 unsigned AddressSpace) {
773 // Handle non-power-of-two vectors such as <3 x float>
774 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
775 unsigned NumElem = VTy->getVectorNumElements();
777 // Handle a few common cases:
779 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
780 // Cost = 64 bit store + extract + 32 bit store.
784 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
785 // Cost = 128 bit store + unpack + 64 bit store.
788 // Assume that all other non-power-of-two numbers are scalarized.
789 if (!isPowerOf2_32(NumElem)) {
790 unsigned Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(),
791 Alignment, AddressSpace);
792 unsigned SplitCost = getScalarizationOverhead(Src,
793 Opcode == Instruction::Load,
794 Opcode==Instruction::Store);
795 return NumElem * Cost + SplitCost;
799 // Legalize the type.
800 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
801 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
804 // Each load/store unit costs 1.
805 unsigned Cost = LT.first * 1;
807 // On Sandybridge 256bit load/stores are double pumped
808 // (but not on Haswell).
809 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
815 unsigned X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
817 unsigned AddressSpace) {
818 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
820 // To calculate scalar take the regular cost, without mask
821 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
823 unsigned NumElem = SrcVTy->getVectorNumElements();
825 VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem);
826 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy, 1)) ||
827 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy, 1)) ||
828 !isPowerOf2_32(NumElem)) {
830 unsigned MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
831 unsigned ScalarCompareCost =
832 getCmpSelInstrCost(Instruction::ICmp,
833 Type::getInt8Ty(getGlobalContext()), NULL);
834 unsigned BranchCost = getCFInstrCost(Instruction::Br);
835 unsigned MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
837 unsigned ValueSplitCost =
838 getScalarizationOverhead(SrcVTy, Opcode == Instruction::Load,
839 Opcode == Instruction::Store);
841 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
842 Alignment, AddressSpace);
843 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
846 // Legalize the type.
847 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(SrcVTy);
849 if (LT.second != TLI->getValueType(SrcVTy).getSimpleVT() &&
850 LT.second.getVectorNumElements() == NumElem)
851 // Promotion requires expand/truncate for data and a shuffle for mask.
852 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, 0) +
853 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, 0);
855 else if (LT.second.getVectorNumElements() > NumElem) {
856 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
857 LT.second.getVectorNumElements());
858 // Expanding requires fill mask with zeroes
859 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
861 if (!ST->hasAVX512())
862 return Cost + LT.first*4; // Each maskmov costs 4
864 // AVX-512 masked load/store is cheapper
865 return Cost+LT.first;
868 unsigned X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
869 // Address computations in vectorized code with non-consecutive addresses will
870 // likely result in more instructions compared to scalar code where the
871 // computation can more often be merged into the index mode. The resulting
872 // extra micro-ops can significantly decrease throughput.
873 unsigned NumVectorInstToHideOverhead = 10;
875 if (Ty->isVectorTy() && IsComplex)
876 return NumVectorInstToHideOverhead;
878 return BaseT::getAddressComputationCost(Ty, IsComplex);
881 unsigned X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
884 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
888 int ISD = TLI->InstructionOpcodeToISD(Opcode);
889 assert(ISD && "Invalid opcode");
891 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
892 // and make it as the cost.
894 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
895 { ISD::FADD, MVT::v2f64, 2 },
896 { ISD::FADD, MVT::v4f32, 4 },
897 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
898 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
899 { ISD::ADD, MVT::v8i16, 5 },
902 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
903 { ISD::FADD, MVT::v4f32, 4 },
904 { ISD::FADD, MVT::v4f64, 5 },
905 { ISD::FADD, MVT::v8f32, 7 },
906 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
907 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
908 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
909 { ISD::ADD, MVT::v8i16, 5 },
910 { ISD::ADD, MVT::v8i32, 5 },
913 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
914 { ISD::FADD, MVT::v2f64, 2 },
915 { ISD::FADD, MVT::v4f32, 4 },
916 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
917 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
918 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
921 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
922 { ISD::FADD, MVT::v4f32, 3 },
923 { ISD::FADD, MVT::v4f64, 3 },
924 { ISD::FADD, MVT::v8f32, 4 },
925 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
926 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
927 { ISD::ADD, MVT::v4i64, 3 },
928 { ISD::ADD, MVT::v8i16, 4 },
929 { ISD::ADD, MVT::v8i32, 5 },
934 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
936 return LT.first * AVX1CostTblPairWise[Idx].Cost;
939 if (ST->hasSSE42()) {
940 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
942 return LT.first * SSE42CostTblPairWise[Idx].Cost;
946 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy);
948 return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
951 if (ST->hasSSE42()) {
952 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
954 return LT.first * SSE42CostTblNoPairWise[Idx].Cost;
958 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
961 /// \brief Calculate the cost of materializing a 64-bit value. This helper
962 /// method might only calculate a fraction of a larger immediate. Therefore it
963 /// is valid to return a cost of ZERO.
964 unsigned X86TTIImpl::getIntImmCost(int64_t Val) {
966 return TTI::TCC_Free;
969 return TTI::TCC_Basic;
971 return 2 * TTI::TCC_Basic;
974 unsigned X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
975 assert(Ty->isIntegerTy());
977 unsigned BitSize = Ty->getPrimitiveSizeInBits();
981 // Never hoist constants larger than 128bit, because this might lead to
982 // incorrect code generation or assertions in codegen.
983 // Fixme: Create a cost model for types larger than i128 once the codegen
984 // issues have been fixed.
986 return TTI::TCC_Free;
989 return TTI::TCC_Free;
991 // Sign-extend all constants to a multiple of 64-bit.
994 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
996 // Split the constant into 64-bit chunks and calculate the cost for each
999 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1000 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1001 int64_t Val = Tmp.getSExtValue();
1002 Cost += getIntImmCost(Val);
1004 // We need at least one instruction to materialze the constant.
1005 return std::max(1U, Cost);
1008 unsigned X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
1009 const APInt &Imm, Type *Ty) {
1010 assert(Ty->isIntegerTy());
1012 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1013 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1014 // here, so that constant hoisting will ignore this constant.
1016 return TTI::TCC_Free;
1018 unsigned ImmIdx = ~0U;
1021 return TTI::TCC_Free;
1022 case Instruction::GetElementPtr:
1023 // Always hoist the base address of a GetElementPtr. This prevents the
1024 // creation of new constants for every base constant that gets constant
1025 // folded with the offset.
1027 return 2 * TTI::TCC_Basic;
1028 return TTI::TCC_Free;
1029 case Instruction::Store:
1032 case Instruction::Add:
1033 case Instruction::Sub:
1034 case Instruction::Mul:
1035 case Instruction::UDiv:
1036 case Instruction::SDiv:
1037 case Instruction::URem:
1038 case Instruction::SRem:
1039 case Instruction::And:
1040 case Instruction::Or:
1041 case Instruction::Xor:
1042 case Instruction::ICmp:
1045 // Always return TCC_Free for the shift value of a shift instruction.
1046 case Instruction::Shl:
1047 case Instruction::LShr:
1048 case Instruction::AShr:
1050 return TTI::TCC_Free;
1052 case Instruction::Trunc:
1053 case Instruction::ZExt:
1054 case Instruction::SExt:
1055 case Instruction::IntToPtr:
1056 case Instruction::PtrToInt:
1057 case Instruction::BitCast:
1058 case Instruction::PHI:
1059 case Instruction::Call:
1060 case Instruction::Select:
1061 case Instruction::Ret:
1062 case Instruction::Load:
1066 if (Idx == ImmIdx) {
1067 unsigned NumConstants = (BitSize + 63) / 64;
1068 unsigned Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1069 return (Cost <= NumConstants * TTI::TCC_Basic)
1070 ? static_cast<unsigned>(TTI::TCC_Free)
1074 return X86TTIImpl::getIntImmCost(Imm, Ty);
1077 unsigned X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
1078 const APInt &Imm, Type *Ty) {
1079 assert(Ty->isIntegerTy());
1081 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1082 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1083 // here, so that constant hoisting will ignore this constant.
1085 return TTI::TCC_Free;
1089 return TTI::TCC_Free;
1090 case Intrinsic::sadd_with_overflow:
1091 case Intrinsic::uadd_with_overflow:
1092 case Intrinsic::ssub_with_overflow:
1093 case Intrinsic::usub_with_overflow:
1094 case Intrinsic::smul_with_overflow:
1095 case Intrinsic::umul_with_overflow:
1096 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1097 return TTI::TCC_Free;
1099 case Intrinsic::experimental_stackmap:
1100 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1101 return TTI::TCC_Free;
1103 case Intrinsic::experimental_patchpoint_void:
1104 case Intrinsic::experimental_patchpoint_i64:
1105 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1106 return TTI::TCC_Free;
1109 return X86TTIImpl::getIntImmCost(Imm, Ty);
1112 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) {
1113 int DataWidth = DataTy->getPrimitiveSizeInBits();
1115 // Todo: AVX512 allows gather/scatter, works with strided and random as well
1116 if ((DataWidth < 32) || (Consecutive == 0))
1118 if (ST->hasAVX512() || ST->hasAVX2())
1123 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, int Consecutive) {
1124 return isLegalMaskedLoad(DataType, Consecutive);