1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<bool> ExperimentalVectorShuffleLegality(
76 "x86-experimental-vector-shuffle-legality", cl::init(false),
77 cl::desc("Enable experimental shuffle legality based on the experimental "
78 "shuffle lowering. Should only be used with the experimental "
82 static cl::opt<int> ReciprocalEstimateRefinementSteps(
83 "x86-recip-refinement-steps", cl::init(1),
84 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
85 "result of the hardware reciprocal estimate instruction."),
88 // Forward declarations.
89 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
92 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
93 SelectionDAG &DAG, SDLoc dl,
94 unsigned vectorWidth) {
95 assert((vectorWidth == 128 || vectorWidth == 256) &&
96 "Unsupported vector width");
97 EVT VT = Vec.getValueType();
98 EVT ElVT = VT.getVectorElementType();
99 unsigned Factor = VT.getSizeInBits()/vectorWidth;
100 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
101 VT.getVectorNumElements()/Factor);
103 // Extract from UNDEF is UNDEF.
104 if (Vec.getOpcode() == ISD::UNDEF)
105 return DAG.getUNDEF(ResultVT);
107 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
108 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
110 // This is the index of the first element of the vectorWidth-bit chunk
112 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
115 // If the input is a buildvector just emit a smaller one.
116 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
117 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
118 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
122 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
125 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
126 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
127 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
128 /// instructions or a simple subregister reference. Idx is an index in the
129 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
130 /// lowering EXTRACT_VECTOR_ELT operations easier.
131 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
132 SelectionDAG &DAG, SDLoc dl) {
133 assert((Vec.getValueType().is256BitVector() ||
134 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
138 /// Generate a DAG to grab 256-bits from a 512-bit vector.
139 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
140 SelectionDAG &DAG, SDLoc dl) {
141 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
142 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
145 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
146 unsigned IdxVal, SelectionDAG &DAG,
147 SDLoc dl, unsigned vectorWidth) {
148 assert((vectorWidth == 128 || vectorWidth == 256) &&
149 "Unsupported vector width");
150 // Inserting UNDEF is Result
151 if (Vec.getOpcode() == ISD::UNDEF)
153 EVT VT = Vec.getValueType();
154 EVT ElVT = VT.getVectorElementType();
155 EVT ResultVT = Result.getValueType();
157 // Insert the relevant vectorWidth bits.
158 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
160 // This is the index of the first element of the vectorWidth-bit chunk
162 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
165 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
166 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
169 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
170 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
171 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
172 /// simple superregister reference. Idx is an index in the 128 bits
173 /// we want. It need not be aligned to a 128-bit boundary. That makes
174 /// lowering INSERT_VECTOR_ELT operations easier.
175 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
176 SelectionDAG &DAG,SDLoc dl) {
177 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
178 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
181 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
182 SelectionDAG &DAG, SDLoc dl) {
183 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
184 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
187 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
188 /// instructions. This is used because creating CONCAT_VECTOR nodes of
189 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
190 /// large BUILD_VECTORS.
191 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
198 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
199 unsigned NumElems, SelectionDAG &DAG,
201 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
202 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
205 // FIXME: This should stop caching the target machine as soon as
206 // we can remove resetOperationActions et al.
207 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM)
208 : TargetLowering(TM) {
209 Subtarget = &TM.getSubtarget<X86Subtarget>();
210 X86ScalarSSEf64 = Subtarget->hasSSE2();
211 X86ScalarSSEf32 = Subtarget->hasSSE1();
212 TD = getDataLayout();
214 resetOperationActions();
217 void X86TargetLowering::resetOperationActions() {
218 const TargetMachine &TM = getTargetMachine();
219 static bool FirstTimeThrough = true;
221 // If none of the target options have changed, then we don't need to reset the
222 // operation actions.
223 if (!FirstTimeThrough && TO == TM.Options) return;
225 if (!FirstTimeThrough) {
226 // Reinitialize the actions.
228 FirstTimeThrough = false;
233 // Set up the TargetLowering object.
234 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
236 // X86 is weird. It always uses i8 for shift amounts and setcc results.
237 setBooleanContents(ZeroOrOneBooleanContent);
238 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
239 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
241 // For 64-bit, since we have so many registers, use the ILP scheduler.
242 // For 32-bit, use the register pressure specific scheduling.
243 // For Atom, always use ILP scheduling.
244 if (Subtarget->isAtom())
245 setSchedulingPreference(Sched::ILP);
246 else if (Subtarget->is64Bit())
247 setSchedulingPreference(Sched::ILP);
249 setSchedulingPreference(Sched::RegPressure);
250 const X86RegisterInfo *RegInfo =
251 TM.getSubtarget<X86Subtarget>().getRegisterInfo();
252 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
254 // Bypass expensive divides on Atom when compiling with O2.
255 if (TM.getOptLevel() >= CodeGenOpt::Default) {
256 if (Subtarget->hasSlowDivide32())
257 addBypassSlowDiv(32, 8);
258 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
259 addBypassSlowDiv(64, 16);
262 if (Subtarget->isTargetKnownWindowsMSVC()) {
263 // Setup Windows compiler runtime calls.
264 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
265 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
266 setLibcallName(RTLIB::SREM_I64, "_allrem");
267 setLibcallName(RTLIB::UREM_I64, "_aullrem");
268 setLibcallName(RTLIB::MUL_I64, "_allmul");
269 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
270 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
271 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
272 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
273 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
275 // The _ftol2 runtime function has an unusual calling conv, which
276 // is modeled by a special pseudo-instruction.
277 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
278 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
279 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
280 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
283 if (Subtarget->isTargetDarwin()) {
284 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
285 setUseUnderscoreSetJmp(false);
286 setUseUnderscoreLongJmp(false);
287 } else if (Subtarget->isTargetWindowsGNU()) {
288 // MS runtime is weird: it exports _setjmp, but longjmp!
289 setUseUnderscoreSetJmp(true);
290 setUseUnderscoreLongJmp(false);
292 setUseUnderscoreSetJmp(true);
293 setUseUnderscoreLongJmp(true);
296 // Set up the register classes.
297 addRegisterClass(MVT::i8, &X86::GR8RegClass);
298 addRegisterClass(MVT::i16, &X86::GR16RegClass);
299 addRegisterClass(MVT::i32, &X86::GR32RegClass);
300 if (Subtarget->is64Bit())
301 addRegisterClass(MVT::i64, &X86::GR64RegClass);
303 for (MVT VT : MVT::integer_valuetypes())
304 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
306 // We don't accept any truncstore of integer registers.
307 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
308 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
309 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
310 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
311 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
312 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
314 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
316 // SETOEQ and SETUNE require checking two conditions.
317 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
318 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
319 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
320 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
321 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
322 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
324 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
326 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
327 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
328 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
330 if (Subtarget->is64Bit()) {
331 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
332 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
333 } else if (!TM.Options.UseSoftFloat) {
334 // We have an algorithm for SSE2->double, and we turn this into a
335 // 64-bit FILD followed by conditional FADD for other targets.
336 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
337 // We have an algorithm for SSE2, and we turn this into a 64-bit
338 // FILD for other targets.
339 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
342 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
344 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
345 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
347 if (!TM.Options.UseSoftFloat) {
348 // SSE has no i16 to fp conversion, only i32
349 if (X86ScalarSSEf32) {
350 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
351 // f32 and f64 cases are Legal, f80 case is not
352 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
354 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
355 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
358 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
359 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
362 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
363 // are Legal, f80 is custom lowered.
364 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
365 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
367 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
369 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
370 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
372 if (X86ScalarSSEf32) {
373 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
374 // f32 and f64 cases are Legal, f80 case is not
375 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
377 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
378 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
381 // Handle FP_TO_UINT by promoting the destination to a larger signed
383 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
384 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
385 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
387 if (Subtarget->is64Bit()) {
388 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
389 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
390 } else if (!TM.Options.UseSoftFloat) {
391 // Since AVX is a superset of SSE3, only check for SSE here.
392 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
393 // Expand FP_TO_UINT into a select.
394 // FIXME: We would like to use a Custom expander here eventually to do
395 // the optimal thing for SSE vs. the default expansion in the legalizer.
396 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
398 // With SSE3 we can use fisttpll to convert to a signed i64; without
399 // SSE, we're stuck with a fistpll.
400 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
403 if (isTargetFTOL()) {
404 // Use the _ftol2 runtime function, which has a pseudo-instruction
405 // to handle its weird calling convention.
406 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
409 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
410 if (!X86ScalarSSEf64) {
411 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
412 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
413 if (Subtarget->is64Bit()) {
414 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
415 // Without SSE, i64->f64 goes through memory.
416 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
420 // Scalar integer divide and remainder are lowered to use operations that
421 // produce two results, to match the available instructions. This exposes
422 // the two-result form to trivial CSE, which is able to combine x/y and x%y
423 // into a single instruction.
425 // Scalar integer multiply-high is also lowered to use two-result
426 // operations, to match the available instructions. However, plain multiply
427 // (low) operations are left as Legal, as there are single-result
428 // instructions for this in x86. Using the two-result multiply instructions
429 // when both high and low results are needed must be arranged by dagcombine.
430 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
432 setOperationAction(ISD::MULHS, VT, Expand);
433 setOperationAction(ISD::MULHU, VT, Expand);
434 setOperationAction(ISD::SDIV, VT, Expand);
435 setOperationAction(ISD::UDIV, VT, Expand);
436 setOperationAction(ISD::SREM, VT, Expand);
437 setOperationAction(ISD::UREM, VT, Expand);
439 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
440 setOperationAction(ISD::ADDC, VT, Custom);
441 setOperationAction(ISD::ADDE, VT, Custom);
442 setOperationAction(ISD::SUBC, VT, Custom);
443 setOperationAction(ISD::SUBE, VT, Custom);
446 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
447 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
448 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
449 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
450 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
451 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
452 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
453 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
454 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
455 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
456 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
457 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
458 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
459 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
460 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
461 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
462 if (Subtarget->is64Bit())
463 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
464 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
465 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
466 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
467 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
468 setOperationAction(ISD::FREM , MVT::f32 , Expand);
469 setOperationAction(ISD::FREM , MVT::f64 , Expand);
470 setOperationAction(ISD::FREM , MVT::f80 , Expand);
471 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
473 // Promote the i8 variants and force them on up to i32 which has a shorter
475 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
476 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
477 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
478 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
479 if (Subtarget->hasBMI()) {
480 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
481 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
482 if (Subtarget->is64Bit())
483 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
485 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
486 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
487 if (Subtarget->is64Bit())
488 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
491 if (Subtarget->hasLZCNT()) {
492 // When promoting the i8 variants, force them to i32 for a shorter
494 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
495 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
496 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
497 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
498 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
499 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
500 if (Subtarget->is64Bit())
501 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
503 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
504 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
505 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
506 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
507 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
508 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
509 if (Subtarget->is64Bit()) {
510 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
511 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
515 // Special handling for half-precision floating point conversions.
516 // If we don't have F16C support, then lower half float conversions
517 // into library calls.
518 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
519 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
520 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
523 // There's never any support for operations beyond MVT::f32.
524 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
525 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
526 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
527 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
529 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
530 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
531 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
532 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
533 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
534 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
536 if (Subtarget->hasPOPCNT()) {
537 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
539 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
540 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
541 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
542 if (Subtarget->is64Bit())
543 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
546 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
548 if (!Subtarget->hasMOVBE())
549 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
551 // These should be promoted to a larger select which is supported.
552 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
553 // X86 wants to expand cmov itself.
554 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
555 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
556 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
557 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
558 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
559 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
560 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
561 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
562 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
563 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
564 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
565 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
566 if (Subtarget->is64Bit()) {
567 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
568 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
570 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
571 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
572 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
573 // support continuation, user-level threading, and etc.. As a result, no
574 // other SjLj exception interfaces are implemented and please don't build
575 // your own exception handling based on them.
576 // LLVM/Clang supports zero-cost DWARF exception handling.
577 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
578 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
581 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
582 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
583 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
584 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
585 if (Subtarget->is64Bit())
586 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
587 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
588 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
589 if (Subtarget->is64Bit()) {
590 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
591 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
592 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
593 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
594 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
596 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
597 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
598 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
599 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
600 if (Subtarget->is64Bit()) {
601 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
602 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
603 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
606 if (Subtarget->hasSSE1())
607 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
609 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
611 // Expand certain atomics
612 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
614 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
615 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
616 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
619 if (Subtarget->hasCmpxchg16b()) {
620 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
623 // FIXME - use subtarget debug flags
624 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
625 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
626 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
629 if (Subtarget->is64Bit()) {
630 setExceptionPointerRegister(X86::RAX);
631 setExceptionSelectorRegister(X86::RDX);
633 setExceptionPointerRegister(X86::EAX);
634 setExceptionSelectorRegister(X86::EDX);
636 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
637 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
639 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
640 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
642 setOperationAction(ISD::TRAP, MVT::Other, Legal);
643 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
645 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
646 setOperationAction(ISD::VASTART , MVT::Other, Custom);
647 setOperationAction(ISD::VAEND , MVT::Other, Expand);
648 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
649 // TargetInfo::X86_64ABIBuiltinVaList
650 setOperationAction(ISD::VAARG , MVT::Other, Custom);
651 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
653 // TargetInfo::CharPtrBuiltinVaList
654 setOperationAction(ISD::VAARG , MVT::Other, Expand);
655 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
658 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
659 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
661 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
663 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
664 // f32 and f64 use SSE.
665 // Set up the FP register classes.
666 addRegisterClass(MVT::f32, &X86::FR32RegClass);
667 addRegisterClass(MVT::f64, &X86::FR64RegClass);
669 // Use ANDPD to simulate FABS.
670 setOperationAction(ISD::FABS , MVT::f64, Custom);
671 setOperationAction(ISD::FABS , MVT::f32, Custom);
673 // Use XORP to simulate FNEG.
674 setOperationAction(ISD::FNEG , MVT::f64, Custom);
675 setOperationAction(ISD::FNEG , MVT::f32, Custom);
677 // Use ANDPD and ORPD to simulate FCOPYSIGN.
678 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
679 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
681 // Lower this to FGETSIGNx86 plus an AND.
682 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
683 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
685 // We don't support sin/cos/fmod
686 setOperationAction(ISD::FSIN , MVT::f64, Expand);
687 setOperationAction(ISD::FCOS , MVT::f64, Expand);
688 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
689 setOperationAction(ISD::FSIN , MVT::f32, Expand);
690 setOperationAction(ISD::FCOS , MVT::f32, Expand);
691 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
693 // Expand FP immediates into loads from the stack, except for the special
695 addLegalFPImmediate(APFloat(+0.0)); // xorpd
696 addLegalFPImmediate(APFloat(+0.0f)); // xorps
697 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
698 // Use SSE for f32, x87 for f64.
699 // Set up the FP register classes.
700 addRegisterClass(MVT::f32, &X86::FR32RegClass);
701 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
703 // Use ANDPS to simulate FABS.
704 setOperationAction(ISD::FABS , MVT::f32, Custom);
706 // Use XORP to simulate FNEG.
707 setOperationAction(ISD::FNEG , MVT::f32, Custom);
709 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
711 // Use ANDPS and ORPS to simulate FCOPYSIGN.
712 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
713 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
715 // We don't support sin/cos/fmod
716 setOperationAction(ISD::FSIN , MVT::f32, Expand);
717 setOperationAction(ISD::FCOS , MVT::f32, Expand);
718 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
720 // Special cases we handle for FP constants.
721 addLegalFPImmediate(APFloat(+0.0f)); // xorps
722 addLegalFPImmediate(APFloat(+0.0)); // FLD0
723 addLegalFPImmediate(APFloat(+1.0)); // FLD1
724 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
725 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
727 if (!TM.Options.UnsafeFPMath) {
728 setOperationAction(ISD::FSIN , MVT::f64, Expand);
729 setOperationAction(ISD::FCOS , MVT::f64, Expand);
730 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
732 } else if (!TM.Options.UseSoftFloat) {
733 // f32 and f64 in x87.
734 // Set up the FP register classes.
735 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
736 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
738 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
739 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
740 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
741 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
743 if (!TM.Options.UnsafeFPMath) {
744 setOperationAction(ISD::FSIN , MVT::f64, Expand);
745 setOperationAction(ISD::FSIN , MVT::f32, Expand);
746 setOperationAction(ISD::FCOS , MVT::f64, Expand);
747 setOperationAction(ISD::FCOS , MVT::f32, Expand);
748 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
749 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
751 addLegalFPImmediate(APFloat(+0.0)); // FLD0
752 addLegalFPImmediate(APFloat(+1.0)); // FLD1
753 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
754 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
755 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
756 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
757 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
758 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
761 // We don't support FMA.
762 setOperationAction(ISD::FMA, MVT::f64, Expand);
763 setOperationAction(ISD::FMA, MVT::f32, Expand);
765 // Long double always uses X87.
766 if (!TM.Options.UseSoftFloat) {
767 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
768 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
769 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
771 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
772 addLegalFPImmediate(TmpFlt); // FLD0
774 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
777 APFloat TmpFlt2(+1.0);
778 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
780 addLegalFPImmediate(TmpFlt2); // FLD1
781 TmpFlt2.changeSign();
782 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
785 if (!TM.Options.UnsafeFPMath) {
786 setOperationAction(ISD::FSIN , MVT::f80, Expand);
787 setOperationAction(ISD::FCOS , MVT::f80, Expand);
788 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
791 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
792 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
793 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
794 setOperationAction(ISD::FRINT, MVT::f80, Expand);
795 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
796 setOperationAction(ISD::FMA, MVT::f80, Expand);
799 // Always use a library call for pow.
800 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
801 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
802 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
804 setOperationAction(ISD::FLOG, MVT::f80, Expand);
805 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
806 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
807 setOperationAction(ISD::FEXP, MVT::f80, Expand);
808 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
809 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
810 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
812 // First set operation action for all vector types to either promote
813 // (for widening) or expand (for scalarization). Then we will selectively
814 // turn on ones that can be effectively codegen'd.
815 for (MVT VT : MVT::vector_valuetypes()) {
816 setOperationAction(ISD::ADD , VT, Expand);
817 setOperationAction(ISD::SUB , VT, Expand);
818 setOperationAction(ISD::FADD, VT, Expand);
819 setOperationAction(ISD::FNEG, VT, Expand);
820 setOperationAction(ISD::FSUB, VT, Expand);
821 setOperationAction(ISD::MUL , VT, Expand);
822 setOperationAction(ISD::FMUL, VT, Expand);
823 setOperationAction(ISD::SDIV, VT, Expand);
824 setOperationAction(ISD::UDIV, VT, Expand);
825 setOperationAction(ISD::FDIV, VT, Expand);
826 setOperationAction(ISD::SREM, VT, Expand);
827 setOperationAction(ISD::UREM, VT, Expand);
828 setOperationAction(ISD::LOAD, VT, Expand);
829 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
830 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
831 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
832 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
833 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
834 setOperationAction(ISD::FABS, VT, Expand);
835 setOperationAction(ISD::FSIN, VT, Expand);
836 setOperationAction(ISD::FSINCOS, VT, Expand);
837 setOperationAction(ISD::FCOS, VT, Expand);
838 setOperationAction(ISD::FSINCOS, VT, Expand);
839 setOperationAction(ISD::FREM, VT, Expand);
840 setOperationAction(ISD::FMA, VT, Expand);
841 setOperationAction(ISD::FPOWI, VT, Expand);
842 setOperationAction(ISD::FSQRT, VT, Expand);
843 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
844 setOperationAction(ISD::FFLOOR, VT, Expand);
845 setOperationAction(ISD::FCEIL, VT, Expand);
846 setOperationAction(ISD::FTRUNC, VT, Expand);
847 setOperationAction(ISD::FRINT, VT, Expand);
848 setOperationAction(ISD::FNEARBYINT, VT, Expand);
849 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
850 setOperationAction(ISD::MULHS, VT, Expand);
851 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
852 setOperationAction(ISD::MULHU, VT, Expand);
853 setOperationAction(ISD::SDIVREM, VT, Expand);
854 setOperationAction(ISD::UDIVREM, VT, Expand);
855 setOperationAction(ISD::FPOW, VT, Expand);
856 setOperationAction(ISD::CTPOP, VT, Expand);
857 setOperationAction(ISD::CTTZ, VT, Expand);
858 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
859 setOperationAction(ISD::CTLZ, VT, Expand);
860 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
861 setOperationAction(ISD::SHL, VT, Expand);
862 setOperationAction(ISD::SRA, VT, Expand);
863 setOperationAction(ISD::SRL, VT, Expand);
864 setOperationAction(ISD::ROTL, VT, Expand);
865 setOperationAction(ISD::ROTR, VT, Expand);
866 setOperationAction(ISD::BSWAP, VT, Expand);
867 setOperationAction(ISD::SETCC, VT, Expand);
868 setOperationAction(ISD::FLOG, VT, Expand);
869 setOperationAction(ISD::FLOG2, VT, Expand);
870 setOperationAction(ISD::FLOG10, VT, Expand);
871 setOperationAction(ISD::FEXP, VT, Expand);
872 setOperationAction(ISD::FEXP2, VT, Expand);
873 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
874 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
875 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
876 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
877 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
878 setOperationAction(ISD::TRUNCATE, VT, Expand);
879 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
880 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
881 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
882 setOperationAction(ISD::VSELECT, VT, Expand);
883 setOperationAction(ISD::SELECT_CC, VT, Expand);
884 for (MVT InnerVT : MVT::vector_valuetypes()) {
885 setTruncStoreAction(InnerVT, VT, Expand);
887 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
888 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
890 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
891 // types, we have to deal with them whether we ask for Expansion or not.
892 // Setting Expand causes its own optimisation problems though, so leave
894 if (VT.getVectorElementType() == MVT::i1)
895 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
899 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
900 // with -msoft-float, disable use of MMX as well.
901 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
902 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
903 // No operations on x86mmx supported, everything uses intrinsics.
906 // MMX-sized vectors (other than x86mmx) are expected to be expanded
907 // into smaller operations.
908 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
909 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
910 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
911 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
912 setOperationAction(ISD::AND, MVT::v8i8, Expand);
913 setOperationAction(ISD::AND, MVT::v4i16, Expand);
914 setOperationAction(ISD::AND, MVT::v2i32, Expand);
915 setOperationAction(ISD::AND, MVT::v1i64, Expand);
916 setOperationAction(ISD::OR, MVT::v8i8, Expand);
917 setOperationAction(ISD::OR, MVT::v4i16, Expand);
918 setOperationAction(ISD::OR, MVT::v2i32, Expand);
919 setOperationAction(ISD::OR, MVT::v1i64, Expand);
920 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
921 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
922 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
923 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
924 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
925 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
926 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
927 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
928 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
929 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
930 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
931 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
932 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
933 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
934 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
935 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
936 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
938 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
939 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
941 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
942 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
943 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
944 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
945 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
946 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
947 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
948 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
949 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
950 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
951 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
952 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
953 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
956 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
957 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
959 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
960 // registers cannot be used even for integer operations.
961 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
962 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
963 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
964 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
966 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
967 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
968 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
969 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
970 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
971 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
972 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
973 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
974 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
975 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
976 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
977 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
978 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
979 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
980 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
981 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
982 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
983 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
984 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
985 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
986 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
987 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
989 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
990 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
991 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
992 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
994 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
995 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
996 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
997 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
998 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1000 // Only provide customized ctpop vector bit twiddling for vector types we
1001 // know to perform better than using the popcnt instructions on each vector
1002 // element. If popcnt isn't supported, always provide the custom version.
1003 if (!Subtarget->hasPOPCNT()) {
1004 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
1005 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
1008 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
1009 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1010 MVT VT = (MVT::SimpleValueType)i;
1011 // Do not attempt to custom lower non-power-of-2 vectors
1012 if (!isPowerOf2_32(VT.getVectorNumElements()))
1014 // Do not attempt to custom lower non-128-bit vectors
1015 if (!VT.is128BitVector())
1017 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1018 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1019 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1022 // We support custom legalizing of sext and anyext loads for specific
1023 // memory vector types which we can load as a scalar (or sequence of
1024 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1025 // loads these must work with a single scalar load.
1026 for (MVT VT : MVT::integer_vector_valuetypes()) {
1027 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1028 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1029 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1030 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1031 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1032 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1033 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1034 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1035 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1038 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1039 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1040 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1041 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1042 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1043 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1045 if (Subtarget->is64Bit()) {
1046 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1047 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1050 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1051 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1052 MVT VT = (MVT::SimpleValueType)i;
1054 // Do not attempt to promote non-128-bit vectors
1055 if (!VT.is128BitVector())
1058 setOperationAction(ISD::AND, VT, Promote);
1059 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1060 setOperationAction(ISD::OR, VT, Promote);
1061 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1062 setOperationAction(ISD::XOR, VT, Promote);
1063 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1064 setOperationAction(ISD::LOAD, VT, Promote);
1065 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1066 setOperationAction(ISD::SELECT, VT, Promote);
1067 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1070 // Custom lower v2i64 and v2f64 selects.
1071 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1072 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1073 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1074 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1076 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1077 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1079 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1080 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1081 // As there is no 64-bit GPR available, we need build a special custom
1082 // sequence to convert from v2i32 to v2f32.
1083 if (!Subtarget->is64Bit())
1084 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1086 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1087 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1089 for (MVT VT : MVT::fp_vector_valuetypes())
1090 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1092 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1093 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1094 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1097 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1098 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1099 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1100 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1101 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1102 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1103 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1104 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1105 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1106 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1107 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1109 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1110 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1111 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1112 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1113 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1114 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1115 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1116 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1117 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1118 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1120 // FIXME: Do we need to handle scalar-to-vector here?
1121 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1123 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1124 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1125 setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
1126 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
1127 setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
1128 // There is no BLENDI for byte vectors. We don't need to custom lower
1129 // some vselects for now.
1130 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1132 // SSE41 brings specific instructions for doing vector sign extend even in
1133 // cases where we don't have SRA.
1134 for (MVT VT : MVT::integer_vector_valuetypes()) {
1135 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1136 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1137 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1140 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1141 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1142 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1143 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1144 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1145 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1146 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1148 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1149 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1150 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1151 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1152 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1153 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1155 // i8 and i16 vectors are custom because the source register and source
1156 // source memory operand types are not the same width. f32 vectors are
1157 // custom since the immediate controlling the insert encodes additional
1159 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1160 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1161 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1162 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1164 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1165 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1166 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1167 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1169 // FIXME: these should be Legal, but that's only for the case where
1170 // the index is constant. For now custom expand to deal with that.
1171 if (Subtarget->is64Bit()) {
1172 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1173 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1177 if (Subtarget->hasSSE2()) {
1178 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1179 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1181 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1182 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1184 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1185 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1187 // In the customized shift lowering, the legal cases in AVX2 will be
1189 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1190 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1192 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1193 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1195 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1198 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1199 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1200 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1201 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1202 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1203 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1204 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1206 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1207 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1208 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1210 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1211 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1212 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1213 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1214 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1215 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1216 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1217 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1218 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1219 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1220 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1221 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1223 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1224 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1225 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1226 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1227 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1228 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1229 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1230 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1231 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1232 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1233 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1234 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1236 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1237 // even though v8i16 is a legal type.
1238 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1239 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1240 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1242 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1243 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1244 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1246 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1247 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1249 for (MVT VT : MVT::fp_vector_valuetypes())
1250 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1252 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1253 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1255 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1256 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1258 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1259 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1261 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1262 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1263 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1264 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1266 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1267 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1268 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1270 setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
1271 setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
1272 setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
1273 setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
1275 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1276 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1277 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1278 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1279 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1280 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1281 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1282 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1283 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1284 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1285 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1286 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1288 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1289 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1290 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1291 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1292 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1293 setOperationAction(ISD::FMA, MVT::f32, Legal);
1294 setOperationAction(ISD::FMA, MVT::f64, Legal);
1297 if (Subtarget->hasInt256()) {
1298 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1299 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1300 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1301 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1303 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1304 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1305 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1306 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1308 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1309 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1310 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1311 // Don't lower v32i8 because there is no 128-bit byte mul
1313 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1314 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1315 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1316 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1318 setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
1319 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1321 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1322 // when we have a 256bit-wide blend with immediate.
1323 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1325 // Only provide customized ctpop vector bit twiddling for vector types we
1326 // know to perform better than using the popcnt instructions on each
1327 // vector element. If popcnt isn't supported, always provide the custom
1329 if (!Subtarget->hasPOPCNT())
1330 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1332 // Custom CTPOP always performs better on natively supported v8i32
1333 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1335 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1336 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1337 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1338 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1339 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1340 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1341 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1343 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1344 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1345 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1346 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1347 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1348 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1350 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1351 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1352 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1353 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1355 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1356 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1357 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1358 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1360 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1361 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1362 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1363 // Don't lower v32i8 because there is no 128-bit byte mul
1366 // In the customized shift lowering, the legal cases in AVX2 will be
1368 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1369 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1371 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1372 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1374 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1376 // Custom lower several nodes for 256-bit types.
1377 for (MVT VT : MVT::vector_valuetypes()) {
1378 if (VT.getScalarSizeInBits() >= 32) {
1379 setOperationAction(ISD::MLOAD, VT, Legal);
1380 setOperationAction(ISD::MSTORE, VT, Legal);
1382 // Extract subvector is special because the value type
1383 // (result) is 128-bit but the source is 256-bit wide.
1384 if (VT.is128BitVector()) {
1385 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1387 // Do not attempt to custom lower other non-256-bit vectors
1388 if (!VT.is256BitVector())
1391 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1392 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1393 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1394 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1395 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1396 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1397 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1400 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1401 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1402 MVT VT = (MVT::SimpleValueType)i;
1404 // Do not attempt to promote non-256-bit vectors
1405 if (!VT.is256BitVector())
1408 setOperationAction(ISD::AND, VT, Promote);
1409 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1410 setOperationAction(ISD::OR, VT, Promote);
1411 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1412 setOperationAction(ISD::XOR, VT, Promote);
1413 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1414 setOperationAction(ISD::LOAD, VT, Promote);
1415 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1416 setOperationAction(ISD::SELECT, VT, Promote);
1417 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1421 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1422 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1423 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1424 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1425 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1427 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1428 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1429 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1431 for (MVT VT : MVT::fp_vector_valuetypes())
1432 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1434 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1435 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1436 setOperationAction(ISD::XOR, MVT::i1, Legal);
1437 setOperationAction(ISD::OR, MVT::i1, Legal);
1438 setOperationAction(ISD::AND, MVT::i1, Legal);
1439 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1440 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1441 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1442 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1443 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1445 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1446 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1447 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1448 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1449 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1450 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1452 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1453 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1454 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1455 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1456 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1457 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1458 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1459 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1461 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1462 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1463 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1464 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1465 if (Subtarget->is64Bit()) {
1466 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1467 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1468 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1469 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1471 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1472 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1473 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1474 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1475 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1476 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1477 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1478 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1479 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1480 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1481 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1482 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1483 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1484 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1486 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1487 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1488 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1489 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1490 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1491 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1492 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1493 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1494 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1495 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1496 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1497 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1498 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1500 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1501 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1502 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1503 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1504 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1505 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1507 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1508 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1510 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1512 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1513 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1514 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1515 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1516 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1517 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1518 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1519 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1520 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1522 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1523 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1525 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1526 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1528 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1530 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1531 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1533 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1534 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1536 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1537 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1539 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1540 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1541 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1542 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1543 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1544 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1546 if (Subtarget->hasCDI()) {
1547 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1548 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1551 // Custom lower several nodes.
1552 for (MVT VT : MVT::vector_valuetypes()) {
1553 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1554 // Extract subvector is special because the value type
1555 // (result) is 256/128-bit but the source is 512-bit wide.
1556 if (VT.is128BitVector() || VT.is256BitVector()) {
1557 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1559 if (VT.getVectorElementType() == MVT::i1)
1560 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1562 // Do not attempt to custom lower other non-512-bit vectors
1563 if (!VT.is512BitVector())
1566 if ( EltSize >= 32) {
1567 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1568 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1569 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1570 setOperationAction(ISD::VSELECT, VT, Legal);
1571 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1572 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1573 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1574 setOperationAction(ISD::MLOAD, VT, Legal);
1575 setOperationAction(ISD::MSTORE, VT, Legal);
1578 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1579 MVT VT = (MVT::SimpleValueType)i;
1581 // Do not attempt to promote non-512-bit vectors.
1582 if (!VT.is512BitVector())
1585 setOperationAction(ISD::SELECT, VT, Promote);
1586 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1590 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1591 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1592 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1594 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1595 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1597 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1598 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1599 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1600 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1601 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1602 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1603 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1604 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1605 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1607 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1608 const MVT VT = (MVT::SimpleValueType)i;
1610 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1612 // Do not attempt to promote non-512-bit vectors.
1613 if (!VT.is512BitVector())
1617 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1618 setOperationAction(ISD::VSELECT, VT, Legal);
1623 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1624 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1625 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1627 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1628 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1629 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1631 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1632 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1633 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1634 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1635 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1636 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1639 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1640 // of this type with custom code.
1641 for (MVT VT : MVT::vector_valuetypes())
1642 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1644 // We want to custom lower some of our intrinsics.
1645 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1646 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1647 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1648 if (!Subtarget->is64Bit())
1649 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1651 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1652 // handle type legalization for these operations here.
1654 // FIXME: We really should do custom legalization for addition and
1655 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1656 // than generic legalization for 64-bit multiplication-with-overflow, though.
1657 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1658 // Add/Sub/Mul with overflow operations are custom lowered.
1660 setOperationAction(ISD::SADDO, VT, Custom);
1661 setOperationAction(ISD::UADDO, VT, Custom);
1662 setOperationAction(ISD::SSUBO, VT, Custom);
1663 setOperationAction(ISD::USUBO, VT, Custom);
1664 setOperationAction(ISD::SMULO, VT, Custom);
1665 setOperationAction(ISD::UMULO, VT, Custom);
1669 if (!Subtarget->is64Bit()) {
1670 // These libcalls are not available in 32-bit.
1671 setLibcallName(RTLIB::SHL_I128, nullptr);
1672 setLibcallName(RTLIB::SRL_I128, nullptr);
1673 setLibcallName(RTLIB::SRA_I128, nullptr);
1676 // Combine sin / cos into one node or libcall if possible.
1677 if (Subtarget->hasSinCos()) {
1678 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1679 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1680 if (Subtarget->isTargetDarwin()) {
1681 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1682 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1683 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1684 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1688 if (Subtarget->isTargetWin64()) {
1689 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1690 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1691 setOperationAction(ISD::SREM, MVT::i128, Custom);
1692 setOperationAction(ISD::UREM, MVT::i128, Custom);
1693 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1694 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1697 // We have target-specific dag combine patterns for the following nodes:
1698 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1699 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1700 setTargetDAGCombine(ISD::VSELECT);
1701 setTargetDAGCombine(ISD::SELECT);
1702 setTargetDAGCombine(ISD::SHL);
1703 setTargetDAGCombine(ISD::SRA);
1704 setTargetDAGCombine(ISD::SRL);
1705 setTargetDAGCombine(ISD::OR);
1706 setTargetDAGCombine(ISD::AND);
1707 setTargetDAGCombine(ISD::ADD);
1708 setTargetDAGCombine(ISD::FADD);
1709 setTargetDAGCombine(ISD::FSUB);
1710 setTargetDAGCombine(ISD::FMA);
1711 setTargetDAGCombine(ISD::SUB);
1712 setTargetDAGCombine(ISD::LOAD);
1713 setTargetDAGCombine(ISD::MLOAD);
1714 setTargetDAGCombine(ISD::STORE);
1715 setTargetDAGCombine(ISD::MSTORE);
1716 setTargetDAGCombine(ISD::ZERO_EXTEND);
1717 setTargetDAGCombine(ISD::ANY_EXTEND);
1718 setTargetDAGCombine(ISD::SIGN_EXTEND);
1719 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1720 setTargetDAGCombine(ISD::TRUNCATE);
1721 setTargetDAGCombine(ISD::SINT_TO_FP);
1722 setTargetDAGCombine(ISD::SETCC);
1723 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1724 setTargetDAGCombine(ISD::BUILD_VECTOR);
1725 setTargetDAGCombine(ISD::MUL);
1726 setTargetDAGCombine(ISD::XOR);
1728 computeRegisterProperties();
1730 // On Darwin, -Os means optimize for size without hurting performance,
1731 // do not reduce the limit.
1732 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1733 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1734 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1735 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1736 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1737 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1738 setPrefLoopAlignment(4); // 2^4 bytes.
1740 // Predictable cmov don't hurt on atom because it's in-order.
1741 PredictableSelectIsExpensive = !Subtarget->isAtom();
1742 EnableExtLdPromotion = true;
1743 setPrefFunctionAlignment(4); // 2^4 bytes.
1745 verifyIntrinsicTables();
1748 // This has so far only been implemented for 64-bit MachO.
1749 bool X86TargetLowering::useLoadStackGuardNode() const {
1750 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1753 TargetLoweringBase::LegalizeTypeAction
1754 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1755 if (ExperimentalVectorWideningLegalization &&
1756 VT.getVectorNumElements() != 1 &&
1757 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1758 return TypeWidenVector;
1760 return TargetLoweringBase::getPreferredVectorAction(VT);
1763 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1765 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1767 const unsigned NumElts = VT.getVectorNumElements();
1768 const EVT EltVT = VT.getVectorElementType();
1769 if (VT.is512BitVector()) {
1770 if (Subtarget->hasAVX512())
1771 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1772 EltVT == MVT::f32 || EltVT == MVT::f64)
1774 case 8: return MVT::v8i1;
1775 case 16: return MVT::v16i1;
1777 if (Subtarget->hasBWI())
1778 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1780 case 32: return MVT::v32i1;
1781 case 64: return MVT::v64i1;
1785 if (VT.is256BitVector() || VT.is128BitVector()) {
1786 if (Subtarget->hasVLX())
1787 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1788 EltVT == MVT::f32 || EltVT == MVT::f64)
1790 case 2: return MVT::v2i1;
1791 case 4: return MVT::v4i1;
1792 case 8: return MVT::v8i1;
1794 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1795 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1797 case 8: return MVT::v8i1;
1798 case 16: return MVT::v16i1;
1799 case 32: return MVT::v32i1;
1803 return VT.changeVectorElementTypeToInteger();
1806 /// Helper for getByValTypeAlignment to determine
1807 /// the desired ByVal argument alignment.
1808 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1811 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1812 if (VTy->getBitWidth() == 128)
1814 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1815 unsigned EltAlign = 0;
1816 getMaxByValAlign(ATy->getElementType(), EltAlign);
1817 if (EltAlign > MaxAlign)
1818 MaxAlign = EltAlign;
1819 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1820 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1821 unsigned EltAlign = 0;
1822 getMaxByValAlign(STy->getElementType(i), EltAlign);
1823 if (EltAlign > MaxAlign)
1824 MaxAlign = EltAlign;
1831 /// Return the desired alignment for ByVal aggregate
1832 /// function arguments in the caller parameter area. For X86, aggregates
1833 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1834 /// are at 4-byte boundaries.
1835 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1836 if (Subtarget->is64Bit()) {
1837 // Max of 8 and alignment of type.
1838 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1845 if (Subtarget->hasSSE1())
1846 getMaxByValAlign(Ty, Align);
1850 /// Returns the target specific optimal type for load
1851 /// and store operations as a result of memset, memcpy, and memmove
1852 /// lowering. If DstAlign is zero that means it's safe to destination
1853 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1854 /// means there isn't a need to check it against alignment requirement,
1855 /// probably because the source does not need to be loaded. If 'IsMemset' is
1856 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1857 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1858 /// source is constant so it does not need to be loaded.
1859 /// It returns EVT::Other if the type should be determined using generic
1860 /// target-independent logic.
1862 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1863 unsigned DstAlign, unsigned SrcAlign,
1864 bool IsMemset, bool ZeroMemset,
1866 MachineFunction &MF) const {
1867 const Function *F = MF.getFunction();
1868 if ((!IsMemset || ZeroMemset) &&
1869 !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
1870 Attribute::NoImplicitFloat)) {
1872 (Subtarget->isUnalignedMemAccessFast() ||
1873 ((DstAlign == 0 || DstAlign >= 16) &&
1874 (SrcAlign == 0 || SrcAlign >= 16)))) {
1876 if (Subtarget->hasInt256())
1878 if (Subtarget->hasFp256())
1881 if (Subtarget->hasSSE2())
1883 if (Subtarget->hasSSE1())
1885 } else if (!MemcpyStrSrc && Size >= 8 &&
1886 !Subtarget->is64Bit() &&
1887 Subtarget->hasSSE2()) {
1888 // Do not use f64 to lower memcpy if source is string constant. It's
1889 // better to use i32 to avoid the loads.
1893 if (Subtarget->is64Bit() && Size >= 8)
1898 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1900 return X86ScalarSSEf32;
1901 else if (VT == MVT::f64)
1902 return X86ScalarSSEf64;
1907 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1912 *Fast = Subtarget->isUnalignedMemAccessFast();
1916 /// Return the entry encoding for a jump table in the
1917 /// current function. The returned value is a member of the
1918 /// MachineJumpTableInfo::JTEntryKind enum.
1919 unsigned X86TargetLowering::getJumpTableEncoding() const {
1920 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1922 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1923 Subtarget->isPICStyleGOT())
1924 return MachineJumpTableInfo::EK_Custom32;
1926 // Otherwise, use the normal jump table encoding heuristics.
1927 return TargetLowering::getJumpTableEncoding();
1931 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1932 const MachineBasicBlock *MBB,
1933 unsigned uid,MCContext &Ctx) const{
1934 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1935 Subtarget->isPICStyleGOT());
1936 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1938 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1939 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1942 /// Returns relocation base for the given PIC jumptable.
1943 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1944 SelectionDAG &DAG) const {
1945 if (!Subtarget->is64Bit())
1946 // This doesn't have SDLoc associated with it, but is not really the
1947 // same as a Register.
1948 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1952 /// This returns the relocation base for the given PIC jumptable,
1953 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1954 const MCExpr *X86TargetLowering::
1955 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1956 MCContext &Ctx) const {
1957 // X86-64 uses RIP relative addressing based on the jump table label.
1958 if (Subtarget->isPICStyleRIPRel())
1959 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1961 // Otherwise, the reference is relative to the PIC base.
1962 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1965 // FIXME: Why this routine is here? Move to RegInfo!
1966 std::pair<const TargetRegisterClass*, uint8_t>
1967 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1968 const TargetRegisterClass *RRC = nullptr;
1970 switch (VT.SimpleTy) {
1972 return TargetLowering::findRepresentativeClass(VT);
1973 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1974 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1977 RRC = &X86::VR64RegClass;
1979 case MVT::f32: case MVT::f64:
1980 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1981 case MVT::v4f32: case MVT::v2f64:
1982 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1984 RRC = &X86::VR128RegClass;
1987 return std::make_pair(RRC, Cost);
1990 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1991 unsigned &Offset) const {
1992 if (!Subtarget->isTargetLinux())
1995 if (Subtarget->is64Bit()) {
1996 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1998 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
2010 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
2011 unsigned DestAS) const {
2012 assert(SrcAS != DestAS && "Expected different address spaces!");
2014 return SrcAS < 256 && DestAS < 256;
2017 //===----------------------------------------------------------------------===//
2018 // Return Value Calling Convention Implementation
2019 //===----------------------------------------------------------------------===//
2021 #include "X86GenCallingConv.inc"
2024 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2025 MachineFunction &MF, bool isVarArg,
2026 const SmallVectorImpl<ISD::OutputArg> &Outs,
2027 LLVMContext &Context) const {
2028 SmallVector<CCValAssign, 16> RVLocs;
2029 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2030 return CCInfo.CheckReturn(Outs, RetCC_X86);
2033 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2034 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2039 X86TargetLowering::LowerReturn(SDValue Chain,
2040 CallingConv::ID CallConv, bool isVarArg,
2041 const SmallVectorImpl<ISD::OutputArg> &Outs,
2042 const SmallVectorImpl<SDValue> &OutVals,
2043 SDLoc dl, SelectionDAG &DAG) const {
2044 MachineFunction &MF = DAG.getMachineFunction();
2045 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2047 SmallVector<CCValAssign, 16> RVLocs;
2048 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2049 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2052 SmallVector<SDValue, 6> RetOps;
2053 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2054 // Operand #1 = Bytes To Pop
2055 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2058 // Copy the result values into the output registers.
2059 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2060 CCValAssign &VA = RVLocs[i];
2061 assert(VA.isRegLoc() && "Can only return in registers!");
2062 SDValue ValToCopy = OutVals[i];
2063 EVT ValVT = ValToCopy.getValueType();
2065 // Promote values to the appropriate types.
2066 if (VA.getLocInfo() == CCValAssign::SExt)
2067 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2068 else if (VA.getLocInfo() == CCValAssign::ZExt)
2069 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2070 else if (VA.getLocInfo() == CCValAssign::AExt)
2071 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2072 else if (VA.getLocInfo() == CCValAssign::BCvt)
2073 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2075 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2076 "Unexpected FP-extend for return value.");
2078 // If this is x86-64, and we disabled SSE, we can't return FP values,
2079 // or SSE or MMX vectors.
2080 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2081 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2082 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2083 report_fatal_error("SSE register return with SSE disabled");
2085 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2086 // llvm-gcc has never done it right and no one has noticed, so this
2087 // should be OK for now.
2088 if (ValVT == MVT::f64 &&
2089 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2090 report_fatal_error("SSE2 register return with SSE2 disabled");
2092 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2093 // the RET instruction and handled by the FP Stackifier.
2094 if (VA.getLocReg() == X86::FP0 ||
2095 VA.getLocReg() == X86::FP1) {
2096 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2097 // change the value to the FP stack register class.
2098 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2099 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2100 RetOps.push_back(ValToCopy);
2101 // Don't emit a copytoreg.
2105 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2106 // which is returned in RAX / RDX.
2107 if (Subtarget->is64Bit()) {
2108 if (ValVT == MVT::x86mmx) {
2109 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2110 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2111 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2113 // If we don't have SSE2 available, convert to v4f32 so the generated
2114 // register is legal.
2115 if (!Subtarget->hasSSE2())
2116 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2121 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2122 Flag = Chain.getValue(1);
2123 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2126 // The x86-64 ABIs require that for returning structs by value we copy
2127 // the sret argument into %rax/%eax (depending on ABI) for the return.
2128 // Win32 requires us to put the sret argument to %eax as well.
2129 // We saved the argument into a virtual register in the entry block,
2130 // so now we copy the value out and into %rax/%eax.
2131 if (DAG.getMachineFunction().getFunction()->hasStructRetAttr() &&
2132 (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC())) {
2133 MachineFunction &MF = DAG.getMachineFunction();
2134 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2135 unsigned Reg = FuncInfo->getSRetReturnReg();
2137 "SRetReturnReg should have been set in LowerFormalArguments().");
2138 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
2141 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2142 X86::RAX : X86::EAX;
2143 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2144 Flag = Chain.getValue(1);
2146 // RAX/EAX now acts like a return value.
2147 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2150 RetOps[0] = Chain; // Update chain.
2152 // Add the flag if we have it.
2154 RetOps.push_back(Flag);
2156 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2159 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2160 if (N->getNumValues() != 1)
2162 if (!N->hasNUsesOfValue(1, 0))
2165 SDValue TCChain = Chain;
2166 SDNode *Copy = *N->use_begin();
2167 if (Copy->getOpcode() == ISD::CopyToReg) {
2168 // If the copy has a glue operand, we conservatively assume it isn't safe to
2169 // perform a tail call.
2170 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2172 TCChain = Copy->getOperand(0);
2173 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2176 bool HasRet = false;
2177 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2179 if (UI->getOpcode() != X86ISD::RET_FLAG)
2181 // If we are returning more than one value, we can definitely
2182 // not make a tail call see PR19530
2183 if (UI->getNumOperands() > 4)
2185 if (UI->getNumOperands() == 4 &&
2186 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2199 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2200 ISD::NodeType ExtendKind) const {
2202 // TODO: Is this also valid on 32-bit?
2203 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2204 ReturnMVT = MVT::i8;
2206 ReturnMVT = MVT::i32;
2208 EVT MinVT = getRegisterType(Context, ReturnMVT);
2209 return VT.bitsLT(MinVT) ? MinVT : VT;
2212 /// Lower the result values of a call into the
2213 /// appropriate copies out of appropriate physical registers.
2216 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2217 CallingConv::ID CallConv, bool isVarArg,
2218 const SmallVectorImpl<ISD::InputArg> &Ins,
2219 SDLoc dl, SelectionDAG &DAG,
2220 SmallVectorImpl<SDValue> &InVals) const {
2222 // Assign locations to each value returned by this call.
2223 SmallVector<CCValAssign, 16> RVLocs;
2224 bool Is64Bit = Subtarget->is64Bit();
2225 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2227 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2229 // Copy all of the result registers out of their specified physreg.
2230 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2231 CCValAssign &VA = RVLocs[i];
2232 EVT CopyVT = VA.getValVT();
2234 // If this is x86-64, and we disabled SSE, we can't return FP values
2235 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2236 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2237 report_fatal_error("SSE register return with SSE disabled");
2240 // If we prefer to use the value in xmm registers, copy it out as f80 and
2241 // use a truncate to move it from fp stack reg to xmm reg.
2242 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2243 isScalarFPTypeInSSEReg(VA.getValVT()))
2246 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2247 CopyVT, InFlag).getValue(1);
2248 SDValue Val = Chain.getValue(0);
2250 if (CopyVT != VA.getValVT())
2251 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2252 // This truncation won't change the value.
2253 DAG.getIntPtrConstant(1));
2255 InFlag = Chain.getValue(2);
2256 InVals.push_back(Val);
2262 //===----------------------------------------------------------------------===//
2263 // C & StdCall & Fast Calling Convention implementation
2264 //===----------------------------------------------------------------------===//
2265 // StdCall calling convention seems to be standard for many Windows' API
2266 // routines and around. It differs from C calling convention just a little:
2267 // callee should clean up the stack, not caller. Symbols should be also
2268 // decorated in some fancy way :) It doesn't support any vector arguments.
2269 // For info on fast calling convention see Fast Calling Convention (tail call)
2270 // implementation LowerX86_32FastCCCallTo.
2272 /// CallIsStructReturn - Determines whether a call uses struct return
2274 enum StructReturnType {
2279 static StructReturnType
2280 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2282 return NotStructReturn;
2284 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2285 if (!Flags.isSRet())
2286 return NotStructReturn;
2287 if (Flags.isInReg())
2288 return RegStructReturn;
2289 return StackStructReturn;
2292 /// Determines whether a function uses struct return semantics.
2293 static StructReturnType
2294 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2296 return NotStructReturn;
2298 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2299 if (!Flags.isSRet())
2300 return NotStructReturn;
2301 if (Flags.isInReg())
2302 return RegStructReturn;
2303 return StackStructReturn;
2306 /// Make a copy of an aggregate at address specified by "Src" to address
2307 /// "Dst" with size and alignment information specified by the specific
2308 /// parameter attribute. The copy will be passed as a byval function parameter.
2310 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2311 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2313 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2315 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2316 /*isVolatile*/false, /*AlwaysInline=*/true,
2317 MachinePointerInfo(), MachinePointerInfo());
2320 /// Return true if the calling convention is one that
2321 /// supports tail call optimization.
2322 static bool IsTailCallConvention(CallingConv::ID CC) {
2323 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2324 CC == CallingConv::HiPE);
2327 /// \brief Return true if the calling convention is a C calling convention.
2328 static bool IsCCallConvention(CallingConv::ID CC) {
2329 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2330 CC == CallingConv::X86_64_SysV);
2333 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2334 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2338 CallingConv::ID CalleeCC = CS.getCallingConv();
2339 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2345 /// Return true if the function is being made into
2346 /// a tailcall target by changing its ABI.
2347 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2348 bool GuaranteedTailCallOpt) {
2349 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2353 X86TargetLowering::LowerMemArgument(SDValue Chain,
2354 CallingConv::ID CallConv,
2355 const SmallVectorImpl<ISD::InputArg> &Ins,
2356 SDLoc dl, SelectionDAG &DAG,
2357 const CCValAssign &VA,
2358 MachineFrameInfo *MFI,
2360 // Create the nodes corresponding to a load from this parameter slot.
2361 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2362 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2363 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2364 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2367 // If value is passed by pointer we have address passed instead of the value
2369 if (VA.getLocInfo() == CCValAssign::Indirect)
2370 ValVT = VA.getLocVT();
2372 ValVT = VA.getValVT();
2374 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2375 // changed with more analysis.
2376 // In case of tail call optimization mark all arguments mutable. Since they
2377 // could be overwritten by lowering of arguments in case of a tail call.
2378 if (Flags.isByVal()) {
2379 unsigned Bytes = Flags.getByValSize();
2380 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2381 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2382 return DAG.getFrameIndex(FI, getPointerTy());
2384 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2385 VA.getLocMemOffset(), isImmutable);
2386 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2387 return DAG.getLoad(ValVT, dl, Chain, FIN,
2388 MachinePointerInfo::getFixedStack(FI),
2389 false, false, false, 0);
2393 // FIXME: Get this from tablegen.
2394 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2395 const X86Subtarget *Subtarget) {
2396 assert(Subtarget->is64Bit());
2398 if (Subtarget->isCallingConvWin64(CallConv)) {
2399 static const MCPhysReg GPR64ArgRegsWin64[] = {
2400 X86::RCX, X86::RDX, X86::R8, X86::R9
2402 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2405 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2406 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2408 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2411 // FIXME: Get this from tablegen.
2412 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2413 CallingConv::ID CallConv,
2414 const X86Subtarget *Subtarget) {
2415 assert(Subtarget->is64Bit());
2416 if (Subtarget->isCallingConvWin64(CallConv)) {
2417 // The XMM registers which might contain var arg parameters are shadowed
2418 // in their paired GPR. So we only need to save the GPR to their home
2420 // TODO: __vectorcall will change this.
2424 const Function *Fn = MF.getFunction();
2425 bool NoImplicitFloatOps = Fn->getAttributes().
2426 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
2427 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2428 "SSE register cannot be used when SSE is disabled!");
2429 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2430 !Subtarget->hasSSE1())
2431 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2435 static const MCPhysReg XMMArgRegs64Bit[] = {
2436 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2437 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2439 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2443 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2444 CallingConv::ID CallConv,
2446 const SmallVectorImpl<ISD::InputArg> &Ins,
2449 SmallVectorImpl<SDValue> &InVals)
2451 MachineFunction &MF = DAG.getMachineFunction();
2452 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2454 const Function* Fn = MF.getFunction();
2455 if (Fn->hasExternalLinkage() &&
2456 Subtarget->isTargetCygMing() &&
2457 Fn->getName() == "main")
2458 FuncInfo->setForceFramePointer(true);
2460 MachineFrameInfo *MFI = MF.getFrameInfo();
2461 bool Is64Bit = Subtarget->is64Bit();
2462 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2464 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2465 "Var args not supported with calling convention fastcc, ghc or hipe");
2467 // Assign locations to all of the incoming arguments.
2468 SmallVector<CCValAssign, 16> ArgLocs;
2469 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2471 // Allocate shadow area for Win64
2473 CCInfo.AllocateStack(32, 8);
2475 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2477 unsigned LastVal = ~0U;
2479 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2480 CCValAssign &VA = ArgLocs[i];
2481 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2483 assert(VA.getValNo() != LastVal &&
2484 "Don't support value assigned to multiple locs yet");
2486 LastVal = VA.getValNo();
2488 if (VA.isRegLoc()) {
2489 EVT RegVT = VA.getLocVT();
2490 const TargetRegisterClass *RC;
2491 if (RegVT == MVT::i32)
2492 RC = &X86::GR32RegClass;
2493 else if (Is64Bit && RegVT == MVT::i64)
2494 RC = &X86::GR64RegClass;
2495 else if (RegVT == MVT::f32)
2496 RC = &X86::FR32RegClass;
2497 else if (RegVT == MVT::f64)
2498 RC = &X86::FR64RegClass;
2499 else if (RegVT.is512BitVector())
2500 RC = &X86::VR512RegClass;
2501 else if (RegVT.is256BitVector())
2502 RC = &X86::VR256RegClass;
2503 else if (RegVT.is128BitVector())
2504 RC = &X86::VR128RegClass;
2505 else if (RegVT == MVT::x86mmx)
2506 RC = &X86::VR64RegClass;
2507 else if (RegVT == MVT::i1)
2508 RC = &X86::VK1RegClass;
2509 else if (RegVT == MVT::v8i1)
2510 RC = &X86::VK8RegClass;
2511 else if (RegVT == MVT::v16i1)
2512 RC = &X86::VK16RegClass;
2513 else if (RegVT == MVT::v32i1)
2514 RC = &X86::VK32RegClass;
2515 else if (RegVT == MVT::v64i1)
2516 RC = &X86::VK64RegClass;
2518 llvm_unreachable("Unknown argument type!");
2520 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2521 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2523 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2524 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2526 if (VA.getLocInfo() == CCValAssign::SExt)
2527 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2528 DAG.getValueType(VA.getValVT()));
2529 else if (VA.getLocInfo() == CCValAssign::ZExt)
2530 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2531 DAG.getValueType(VA.getValVT()));
2532 else if (VA.getLocInfo() == CCValAssign::BCvt)
2533 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2535 if (VA.isExtInLoc()) {
2536 // Handle MMX values passed in XMM regs.
2537 if (RegVT.isVector())
2538 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2540 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2543 assert(VA.isMemLoc());
2544 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2547 // If value is passed via pointer - do a load.
2548 if (VA.getLocInfo() == CCValAssign::Indirect)
2549 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2550 MachinePointerInfo(), false, false, false, 0);
2552 InVals.push_back(ArgValue);
2555 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2556 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2557 // The x86-64 ABIs require that for returning structs by value we copy
2558 // the sret argument into %rax/%eax (depending on ABI) for the return.
2559 // Win32 requires us to put the sret argument to %eax as well.
2560 // Save the argument into a virtual register so that we can access it
2561 // from the return points.
2562 if (Ins[i].Flags.isSRet()) {
2563 unsigned Reg = FuncInfo->getSRetReturnReg();
2565 MVT PtrTy = getPointerTy();
2566 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2567 FuncInfo->setSRetReturnReg(Reg);
2569 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2570 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2576 unsigned StackSize = CCInfo.getNextStackOffset();
2577 // Align stack specially for tail calls.
2578 if (FuncIsMadeTailCallSafe(CallConv,
2579 MF.getTarget().Options.GuaranteedTailCallOpt))
2580 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2582 // If the function takes variable number of arguments, make a frame index for
2583 // the start of the first vararg value... for expansion of llvm.va_start. We
2584 // can skip this if there are no va_start calls.
2585 if (MFI->hasVAStart() &&
2586 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2587 CallConv != CallingConv::X86_ThisCall))) {
2588 FuncInfo->setVarArgsFrameIndex(
2589 MFI->CreateFixedObject(1, StackSize, true));
2592 // Figure out if XMM registers are in use.
2593 assert(!(MF.getTarget().Options.UseSoftFloat &&
2594 Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
2595 Attribute::NoImplicitFloat)) &&
2596 "SSE register cannot be used when SSE is disabled!");
2598 // 64-bit calling conventions support varargs and register parameters, so we
2599 // have to do extra work to spill them in the prologue.
2600 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2601 // Find the first unallocated argument registers.
2602 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2603 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2604 unsigned NumIntRegs =
2605 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2606 unsigned NumXMMRegs =
2607 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2608 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2609 "SSE register cannot be used when SSE is disabled!");
2611 // Gather all the live in physical registers.
2612 SmallVector<SDValue, 6> LiveGPRs;
2613 SmallVector<SDValue, 8> LiveXMMRegs;
2615 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2616 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2618 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2620 if (!ArgXMMs.empty()) {
2621 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2622 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2623 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2624 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2625 LiveXMMRegs.push_back(
2626 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2631 const TargetFrameLowering &TFI = *MF.getSubtarget().getFrameLowering();
2632 // Get to the caller-allocated home save location. Add 8 to account
2633 // for the return address.
2634 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2635 FuncInfo->setRegSaveFrameIndex(
2636 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2637 // Fixup to set vararg frame on shadow area (4 x i64).
2639 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2641 // For X86-64, if there are vararg parameters that are passed via
2642 // registers, then we must store them to their spots on the stack so
2643 // they may be loaded by deferencing the result of va_next.
2644 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2645 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2646 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2647 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2650 // Store the integer parameter registers.
2651 SmallVector<SDValue, 8> MemOps;
2652 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2654 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2655 for (SDValue Val : LiveGPRs) {
2656 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2657 DAG.getIntPtrConstant(Offset));
2659 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2660 MachinePointerInfo::getFixedStack(
2661 FuncInfo->getRegSaveFrameIndex(), Offset),
2663 MemOps.push_back(Store);
2667 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2668 // Now store the XMM (fp + vector) parameter registers.
2669 SmallVector<SDValue, 12> SaveXMMOps;
2670 SaveXMMOps.push_back(Chain);
2671 SaveXMMOps.push_back(ALVal);
2672 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2673 FuncInfo->getRegSaveFrameIndex()));
2674 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2675 FuncInfo->getVarArgsFPOffset()));
2676 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2678 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2679 MVT::Other, SaveXMMOps));
2682 if (!MemOps.empty())
2683 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2686 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2687 // Find the largest legal vector type.
2688 MVT VecVT = MVT::Other;
2689 // FIXME: Only some x86_32 calling conventions support AVX512.
2690 if (Subtarget->hasAVX512() &&
2691 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2692 CallConv == CallingConv::Intel_OCL_BI)))
2693 VecVT = MVT::v16f32;
2694 else if (Subtarget->hasAVX())
2696 else if (Subtarget->hasSSE2())
2699 // We forward some GPRs and some vector types.
2700 SmallVector<MVT, 2> RegParmTypes;
2701 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2702 RegParmTypes.push_back(IntVT);
2703 if (VecVT != MVT::Other)
2704 RegParmTypes.push_back(VecVT);
2706 // Compute the set of forwarded registers. The rest are scratch.
2707 SmallVectorImpl<ForwardedRegister> &Forwards =
2708 FuncInfo->getForwardedMustTailRegParms();
2709 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2711 // Conservatively forward AL on x86_64, since it might be used for varargs.
2712 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2713 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2714 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2717 // Copy all forwards from physical to virtual registers.
2718 for (ForwardedRegister &F : Forwards) {
2719 // FIXME: Can we use a less constrained schedule?
2720 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2721 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2722 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2726 // Some CCs need callee pop.
2727 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2728 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2729 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2731 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2732 // If this is an sret function, the return should pop the hidden pointer.
2733 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2734 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2735 argsAreStructReturn(Ins) == StackStructReturn)
2736 FuncInfo->setBytesToPopOnReturn(4);
2740 // RegSaveFrameIndex is X86-64 only.
2741 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2742 if (CallConv == CallingConv::X86_FastCall ||
2743 CallConv == CallingConv::X86_ThisCall)
2744 // fastcc functions can't have varargs.
2745 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2748 FuncInfo->setArgumentStackSize(StackSize);
2754 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2755 SDValue StackPtr, SDValue Arg,
2756 SDLoc dl, SelectionDAG &DAG,
2757 const CCValAssign &VA,
2758 ISD::ArgFlagsTy Flags) const {
2759 unsigned LocMemOffset = VA.getLocMemOffset();
2760 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2761 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2762 if (Flags.isByVal())
2763 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2765 return DAG.getStore(Chain, dl, Arg, PtrOff,
2766 MachinePointerInfo::getStack(LocMemOffset),
2770 /// Emit a load of return address if tail call
2771 /// optimization is performed and it is required.
2773 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2774 SDValue &OutRetAddr, SDValue Chain,
2775 bool IsTailCall, bool Is64Bit,
2776 int FPDiff, SDLoc dl) const {
2777 // Adjust the Return address stack slot.
2778 EVT VT = getPointerTy();
2779 OutRetAddr = getReturnAddressFrameIndex(DAG);
2781 // Load the "old" Return address.
2782 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2783 false, false, false, 0);
2784 return SDValue(OutRetAddr.getNode(), 1);
2787 /// Emit a store of the return address if tail call
2788 /// optimization is performed and it is required (FPDiff!=0).
2789 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2790 SDValue Chain, SDValue RetAddrFrIdx,
2791 EVT PtrVT, unsigned SlotSize,
2792 int FPDiff, SDLoc dl) {
2793 // Store the return address to the appropriate stack slot.
2794 if (!FPDiff) return Chain;
2795 // Calculate the new stack slot for the return address.
2796 int NewReturnAddrFI =
2797 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2799 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2800 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2801 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2807 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2808 SmallVectorImpl<SDValue> &InVals) const {
2809 SelectionDAG &DAG = CLI.DAG;
2811 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2812 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2813 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2814 SDValue Chain = CLI.Chain;
2815 SDValue Callee = CLI.Callee;
2816 CallingConv::ID CallConv = CLI.CallConv;
2817 bool &isTailCall = CLI.IsTailCall;
2818 bool isVarArg = CLI.IsVarArg;
2820 MachineFunction &MF = DAG.getMachineFunction();
2821 bool Is64Bit = Subtarget->is64Bit();
2822 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2823 StructReturnType SR = callIsStructReturn(Outs);
2824 bool IsSibcall = false;
2825 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2827 if (MF.getTarget().Options.DisableTailCalls)
2830 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2832 // Force this to be a tail call. The verifier rules are enough to ensure
2833 // that we can lower this successfully without moving the return address
2836 } else if (isTailCall) {
2837 // Check if it's really possible to do a tail call.
2838 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2839 isVarArg, SR != NotStructReturn,
2840 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2841 Outs, OutVals, Ins, DAG);
2843 // Sibcalls are automatically detected tailcalls which do not require
2845 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2852 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2853 "Var args not supported with calling convention fastcc, ghc or hipe");
2855 // Analyze operands of the call, assigning locations to each operand.
2856 SmallVector<CCValAssign, 16> ArgLocs;
2857 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2859 // Allocate shadow area for Win64
2861 CCInfo.AllocateStack(32, 8);
2863 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2865 // Get a count of how many bytes are to be pushed on the stack.
2866 unsigned NumBytes = CCInfo.getNextStackOffset();
2868 // This is a sibcall. The memory operands are available in caller's
2869 // own caller's stack.
2871 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2872 IsTailCallConvention(CallConv))
2873 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2876 if (isTailCall && !IsSibcall && !IsMustTail) {
2877 // Lower arguments at fp - stackoffset + fpdiff.
2878 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2880 FPDiff = NumBytesCallerPushed - NumBytes;
2882 // Set the delta of movement of the returnaddr stackslot.
2883 // But only set if delta is greater than previous delta.
2884 if (FPDiff < X86Info->getTCReturnAddrDelta())
2885 X86Info->setTCReturnAddrDelta(FPDiff);
2888 unsigned NumBytesToPush = NumBytes;
2889 unsigned NumBytesToPop = NumBytes;
2891 // If we have an inalloca argument, all stack space has already been allocated
2892 // for us and be right at the top of the stack. We don't support multiple
2893 // arguments passed in memory when using inalloca.
2894 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2896 if (!ArgLocs.back().isMemLoc())
2897 report_fatal_error("cannot use inalloca attribute on a register "
2899 if (ArgLocs.back().getLocMemOffset() != 0)
2900 report_fatal_error("any parameter with the inalloca attribute must be "
2901 "the only memory argument");
2905 Chain = DAG.getCALLSEQ_START(
2906 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2908 SDValue RetAddrFrIdx;
2909 // Load return address for tail calls.
2910 if (isTailCall && FPDiff)
2911 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2912 Is64Bit, FPDiff, dl);
2914 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2915 SmallVector<SDValue, 8> MemOpChains;
2918 // Walk the register/memloc assignments, inserting copies/loads. In the case
2919 // of tail call optimization arguments are handle later.
2920 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
2921 DAG.getSubtarget().getRegisterInfo());
2922 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2923 // Skip inalloca arguments, they have already been written.
2924 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2925 if (Flags.isInAlloca())
2928 CCValAssign &VA = ArgLocs[i];
2929 EVT RegVT = VA.getLocVT();
2930 SDValue Arg = OutVals[i];
2931 bool isByVal = Flags.isByVal();
2933 // Promote the value if needed.
2934 switch (VA.getLocInfo()) {
2935 default: llvm_unreachable("Unknown loc info!");
2936 case CCValAssign::Full: break;
2937 case CCValAssign::SExt:
2938 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2940 case CCValAssign::ZExt:
2941 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2943 case CCValAssign::AExt:
2944 if (RegVT.is128BitVector()) {
2945 // Special case: passing MMX values in XMM registers.
2946 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2947 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2948 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2950 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2952 case CCValAssign::BCvt:
2953 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2955 case CCValAssign::Indirect: {
2956 // Store the argument.
2957 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2958 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2959 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2960 MachinePointerInfo::getFixedStack(FI),
2967 if (VA.isRegLoc()) {
2968 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2969 if (isVarArg && IsWin64) {
2970 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2971 // shadow reg if callee is a varargs function.
2972 unsigned ShadowReg = 0;
2973 switch (VA.getLocReg()) {
2974 case X86::XMM0: ShadowReg = X86::RCX; break;
2975 case X86::XMM1: ShadowReg = X86::RDX; break;
2976 case X86::XMM2: ShadowReg = X86::R8; break;
2977 case X86::XMM3: ShadowReg = X86::R9; break;
2980 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2982 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2983 assert(VA.isMemLoc());
2984 if (!StackPtr.getNode())
2985 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2987 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2988 dl, DAG, VA, Flags));
2992 if (!MemOpChains.empty())
2993 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2995 if (Subtarget->isPICStyleGOT()) {
2996 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2999 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
3000 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
3002 // If we are tail calling and generating PIC/GOT style code load the
3003 // address of the callee into ECX. The value in ecx is used as target of
3004 // the tail jump. This is done to circumvent the ebx/callee-saved problem
3005 // for tail calls on PIC/GOT architectures. Normally we would just put the
3006 // address of GOT into ebx and then call target@PLT. But for tail calls
3007 // ebx would be restored (since ebx is callee saved) before jumping to the
3010 // Note: The actual moving to ECX is done further down.
3011 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
3012 if (G && !G->getGlobal()->hasHiddenVisibility() &&
3013 !G->getGlobal()->hasProtectedVisibility())
3014 Callee = LowerGlobalAddress(Callee, DAG);
3015 else if (isa<ExternalSymbolSDNode>(Callee))
3016 Callee = LowerExternalSymbol(Callee, DAG);
3020 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
3021 // From AMD64 ABI document:
3022 // For calls that may call functions that use varargs or stdargs
3023 // (prototype-less calls or calls to functions containing ellipsis (...) in
3024 // the declaration) %al is used as hidden argument to specify the number
3025 // of SSE registers used. The contents of %al do not need to match exactly
3026 // the number of registers, but must be an ubound on the number of SSE
3027 // registers used and is in the range 0 - 8 inclusive.
3029 // Count the number of XMM registers allocated.
3030 static const MCPhysReg XMMArgRegs[] = {
3031 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3032 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3034 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3035 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3036 && "SSE registers cannot be used when SSE is disabled");
3038 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3039 DAG.getConstant(NumXMMRegs, MVT::i8)));
3042 if (isVarArg && IsMustTail) {
3043 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3044 for (const auto &F : Forwards) {
3045 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3046 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3050 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3051 // don't need this because the eligibility check rejects calls that require
3052 // shuffling arguments passed in memory.
3053 if (!IsSibcall && isTailCall) {
3054 // Force all the incoming stack arguments to be loaded from the stack
3055 // before any new outgoing arguments are stored to the stack, because the
3056 // outgoing stack slots may alias the incoming argument stack slots, and
3057 // the alias isn't otherwise explicit. This is slightly more conservative
3058 // than necessary, because it means that each store effectively depends
3059 // on every argument instead of just those arguments it would clobber.
3060 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3062 SmallVector<SDValue, 8> MemOpChains2;
3065 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3066 CCValAssign &VA = ArgLocs[i];
3069 assert(VA.isMemLoc());
3070 SDValue Arg = OutVals[i];
3071 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3072 // Skip inalloca arguments. They don't require any work.
3073 if (Flags.isInAlloca())
3075 // Create frame index.
3076 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3077 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3078 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3079 FIN = DAG.getFrameIndex(FI, getPointerTy());
3081 if (Flags.isByVal()) {
3082 // Copy relative to framepointer.
3083 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3084 if (!StackPtr.getNode())
3085 StackPtr = DAG.getCopyFromReg(Chain, dl,
3086 RegInfo->getStackRegister(),
3088 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3090 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3094 // Store relative to framepointer.
3095 MemOpChains2.push_back(
3096 DAG.getStore(ArgChain, dl, Arg, FIN,
3097 MachinePointerInfo::getFixedStack(FI),
3102 if (!MemOpChains2.empty())
3103 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3105 // Store the return address to the appropriate stack slot.
3106 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3107 getPointerTy(), RegInfo->getSlotSize(),
3111 // Build a sequence of copy-to-reg nodes chained together with token chain
3112 // and flag operands which copy the outgoing args into registers.
3114 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3115 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3116 RegsToPass[i].second, InFlag);
3117 InFlag = Chain.getValue(1);
3120 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3121 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3122 // In the 64-bit large code model, we have to make all calls
3123 // through a register, since the call instruction's 32-bit
3124 // pc-relative offset may not be large enough to hold the whole
3126 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3127 // If the callee is a GlobalAddress node (quite common, every direct call
3128 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3130 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3132 // We should use extra load for direct calls to dllimported functions in
3134 const GlobalValue *GV = G->getGlobal();
3135 if (!GV->hasDLLImportStorageClass()) {
3136 unsigned char OpFlags = 0;
3137 bool ExtraLoad = false;
3138 unsigned WrapperKind = ISD::DELETED_NODE;
3140 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3141 // external symbols most go through the PLT in PIC mode. If the symbol
3142 // has hidden or protected visibility, or if it is static or local, then
3143 // we don't need to use the PLT - we can directly call it.
3144 if (Subtarget->isTargetELF() &&
3145 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3146 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3147 OpFlags = X86II::MO_PLT;
3148 } else if (Subtarget->isPICStyleStubAny() &&
3149 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3150 (!Subtarget->getTargetTriple().isMacOSX() ||
3151 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3152 // PC-relative references to external symbols should go through $stub,
3153 // unless we're building with the leopard linker or later, which
3154 // automatically synthesizes these stubs.
3155 OpFlags = X86II::MO_DARWIN_STUB;
3156 } else if (Subtarget->isPICStyleRIPRel() &&
3157 isa<Function>(GV) &&
3158 cast<Function>(GV)->getAttributes().
3159 hasAttribute(AttributeSet::FunctionIndex,
3160 Attribute::NonLazyBind)) {
3161 // If the function is marked as non-lazy, generate an indirect call
3162 // which loads from the GOT directly. This avoids runtime overhead
3163 // at the cost of eager binding (and one extra byte of encoding).
3164 OpFlags = X86II::MO_GOTPCREL;
3165 WrapperKind = X86ISD::WrapperRIP;
3169 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3170 G->getOffset(), OpFlags);
3172 // Add a wrapper if needed.
3173 if (WrapperKind != ISD::DELETED_NODE)
3174 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3175 // Add extra indirection if needed.
3177 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3178 MachinePointerInfo::getGOT(),
3179 false, false, false, 0);
3181 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3182 unsigned char OpFlags = 0;
3184 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3185 // external symbols should go through the PLT.
3186 if (Subtarget->isTargetELF() &&
3187 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3188 OpFlags = X86II::MO_PLT;
3189 } else if (Subtarget->isPICStyleStubAny() &&
3190 (!Subtarget->getTargetTriple().isMacOSX() ||
3191 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3192 // PC-relative references to external symbols should go through $stub,
3193 // unless we're building with the leopard linker or later, which
3194 // automatically synthesizes these stubs.
3195 OpFlags = X86II::MO_DARWIN_STUB;
3198 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3200 } else if (Subtarget->isTarget64BitILP32() && Callee->getValueType(0) == MVT::i32) {
3201 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3202 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3205 // Returns a chain & a flag for retval copy to use.
3206 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3207 SmallVector<SDValue, 8> Ops;
3209 if (!IsSibcall && isTailCall) {
3210 Chain = DAG.getCALLSEQ_END(Chain,
3211 DAG.getIntPtrConstant(NumBytesToPop, true),
3212 DAG.getIntPtrConstant(0, true), InFlag, dl);
3213 InFlag = Chain.getValue(1);
3216 Ops.push_back(Chain);
3217 Ops.push_back(Callee);
3220 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3222 // Add argument registers to the end of the list so that they are known live
3224 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3225 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3226 RegsToPass[i].second.getValueType()));
3228 // Add a register mask operand representing the call-preserved registers.
3229 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
3230 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3231 assert(Mask && "Missing call preserved mask for calling convention");
3232 Ops.push_back(DAG.getRegisterMask(Mask));
3234 if (InFlag.getNode())
3235 Ops.push_back(InFlag);
3239 //// If this is the first return lowered for this function, add the regs
3240 //// to the liveout set for the function.
3241 // This isn't right, although it's probably harmless on x86; liveouts
3242 // should be computed from returns not tail calls. Consider a void
3243 // function making a tail call to a function returning int.
3244 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3247 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3248 InFlag = Chain.getValue(1);
3250 // Create the CALLSEQ_END node.
3251 unsigned NumBytesForCalleeToPop;
3252 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3253 DAG.getTarget().Options.GuaranteedTailCallOpt))
3254 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3255 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3256 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3257 SR == StackStructReturn)
3258 // If this is a call to a struct-return function, the callee
3259 // pops the hidden struct pointer, so we have to push it back.
3260 // This is common for Darwin/X86, Linux & Mingw32 targets.
3261 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3262 NumBytesForCalleeToPop = 4;
3264 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3266 // Returns a flag for retval copy to use.
3268 Chain = DAG.getCALLSEQ_END(Chain,
3269 DAG.getIntPtrConstant(NumBytesToPop, true),
3270 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3273 InFlag = Chain.getValue(1);
3276 // Handle result values, copying them out of physregs into vregs that we
3278 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3279 Ins, dl, DAG, InVals);
3282 //===----------------------------------------------------------------------===//
3283 // Fast Calling Convention (tail call) implementation
3284 //===----------------------------------------------------------------------===//
3286 // Like std call, callee cleans arguments, convention except that ECX is
3287 // reserved for storing the tail called function address. Only 2 registers are
3288 // free for argument passing (inreg). Tail call optimization is performed
3290 // * tailcallopt is enabled
3291 // * caller/callee are fastcc
3292 // On X86_64 architecture with GOT-style position independent code only local
3293 // (within module) calls are supported at the moment.
3294 // To keep the stack aligned according to platform abi the function
3295 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3296 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3297 // If a tail called function callee has more arguments than the caller the
3298 // caller needs to make sure that there is room to move the RETADDR to. This is
3299 // achieved by reserving an area the size of the argument delta right after the
3300 // original RETADDR, but before the saved framepointer or the spilled registers
3301 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3313 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3314 /// for a 16 byte align requirement.
3316 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3317 SelectionDAG& DAG) const {
3318 MachineFunction &MF = DAG.getMachineFunction();
3319 const TargetMachine &TM = MF.getTarget();
3320 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
3321 TM.getSubtargetImpl()->getRegisterInfo());
3322 const TargetFrameLowering &TFI = *TM.getSubtargetImpl()->getFrameLowering();
3323 unsigned StackAlignment = TFI.getStackAlignment();
3324 uint64_t AlignMask = StackAlignment - 1;
3325 int64_t Offset = StackSize;
3326 unsigned SlotSize = RegInfo->getSlotSize();
3327 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3328 // Number smaller than 12 so just add the difference.
3329 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3331 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3332 Offset = ((~AlignMask) & Offset) + StackAlignment +
3333 (StackAlignment-SlotSize);
3338 /// MatchingStackOffset - Return true if the given stack call argument is
3339 /// already available in the same position (relatively) of the caller's
3340 /// incoming argument stack.
3342 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3343 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3344 const X86InstrInfo *TII) {
3345 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3347 if (Arg.getOpcode() == ISD::CopyFromReg) {
3348 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3349 if (!TargetRegisterInfo::isVirtualRegister(VR))
3351 MachineInstr *Def = MRI->getVRegDef(VR);
3354 if (!Flags.isByVal()) {
3355 if (!TII->isLoadFromStackSlot(Def, FI))
3358 unsigned Opcode = Def->getOpcode();
3359 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3360 Opcode == X86::LEA64_32r) &&
3361 Def->getOperand(1).isFI()) {
3362 FI = Def->getOperand(1).getIndex();
3363 Bytes = Flags.getByValSize();
3367 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3368 if (Flags.isByVal())
3369 // ByVal argument is passed in as a pointer but it's now being
3370 // dereferenced. e.g.
3371 // define @foo(%struct.X* %A) {
3372 // tail call @bar(%struct.X* byval %A)
3375 SDValue Ptr = Ld->getBasePtr();
3376 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3379 FI = FINode->getIndex();
3380 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3381 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3382 FI = FINode->getIndex();
3383 Bytes = Flags.getByValSize();
3387 assert(FI != INT_MAX);
3388 if (!MFI->isFixedObjectIndex(FI))
3390 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3393 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3394 /// for tail call optimization. Targets which want to do tail call
3395 /// optimization should implement this function.
3397 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3398 CallingConv::ID CalleeCC,
3400 bool isCalleeStructRet,
3401 bool isCallerStructRet,
3403 const SmallVectorImpl<ISD::OutputArg> &Outs,
3404 const SmallVectorImpl<SDValue> &OutVals,
3405 const SmallVectorImpl<ISD::InputArg> &Ins,
3406 SelectionDAG &DAG) const {
3407 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3410 // If -tailcallopt is specified, make fastcc functions tail-callable.
3411 const MachineFunction &MF = DAG.getMachineFunction();
3412 const Function *CallerF = MF.getFunction();
3414 // If the function return type is x86_fp80 and the callee return type is not,
3415 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3416 // perform a tailcall optimization here.
3417 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3420 CallingConv::ID CallerCC = CallerF->getCallingConv();
3421 bool CCMatch = CallerCC == CalleeCC;
3422 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3423 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3425 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3426 if (IsTailCallConvention(CalleeCC) && CCMatch)
3431 // Look for obvious safe cases to perform tail call optimization that do not
3432 // require ABI changes. This is what gcc calls sibcall.
3434 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3435 // emit a special epilogue.
3436 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
3437 DAG.getSubtarget().getRegisterInfo());
3438 if (RegInfo->needsStackRealignment(MF))
3441 // Also avoid sibcall optimization if either caller or callee uses struct
3442 // return semantics.
3443 if (isCalleeStructRet || isCallerStructRet)
3446 // An stdcall/thiscall caller is expected to clean up its arguments; the
3447 // callee isn't going to do that.
3448 // FIXME: this is more restrictive than needed. We could produce a tailcall
3449 // when the stack adjustment matches. For example, with a thiscall that takes
3450 // only one argument.
3451 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3452 CallerCC == CallingConv::X86_ThisCall))
3455 // Do not sibcall optimize vararg calls unless all arguments are passed via
3457 if (isVarArg && !Outs.empty()) {
3459 // Optimizing for varargs on Win64 is unlikely to be safe without
3460 // additional testing.
3461 if (IsCalleeWin64 || IsCallerWin64)
3464 SmallVector<CCValAssign, 16> ArgLocs;
3465 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3468 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3469 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3470 if (!ArgLocs[i].isRegLoc())
3474 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3475 // stack. Therefore, if it's not used by the call it is not safe to optimize
3476 // this into a sibcall.
3477 bool Unused = false;
3478 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3485 SmallVector<CCValAssign, 16> RVLocs;
3486 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3488 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3489 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3490 CCValAssign &VA = RVLocs[i];
3491 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3496 // If the calling conventions do not match, then we'd better make sure the
3497 // results are returned in the same way as what the caller expects.
3499 SmallVector<CCValAssign, 16> RVLocs1;
3500 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3502 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3504 SmallVector<CCValAssign, 16> RVLocs2;
3505 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3507 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3509 if (RVLocs1.size() != RVLocs2.size())
3511 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3512 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3514 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3516 if (RVLocs1[i].isRegLoc()) {
3517 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3520 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3526 // If the callee takes no arguments then go on to check the results of the
3528 if (!Outs.empty()) {
3529 // Check if stack adjustment is needed. For now, do not do this if any
3530 // argument is passed on the stack.
3531 SmallVector<CCValAssign, 16> ArgLocs;
3532 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3535 // Allocate shadow area for Win64
3537 CCInfo.AllocateStack(32, 8);
3539 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3540 if (CCInfo.getNextStackOffset()) {
3541 MachineFunction &MF = DAG.getMachineFunction();
3542 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3545 // Check if the arguments are already laid out in the right way as
3546 // the caller's fixed stack objects.
3547 MachineFrameInfo *MFI = MF.getFrameInfo();
3548 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3549 const X86InstrInfo *TII =
3550 static_cast<const X86InstrInfo *>(DAG.getSubtarget().getInstrInfo());
3551 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3552 CCValAssign &VA = ArgLocs[i];
3553 SDValue Arg = OutVals[i];
3554 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3555 if (VA.getLocInfo() == CCValAssign::Indirect)
3557 if (!VA.isRegLoc()) {
3558 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3565 // If the tailcall address may be in a register, then make sure it's
3566 // possible to register allocate for it. In 32-bit, the call address can
3567 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3568 // callee-saved registers are restored. These happen to be the same
3569 // registers used to pass 'inreg' arguments so watch out for those.
3570 if (!Subtarget->is64Bit() &&
3571 ((!isa<GlobalAddressSDNode>(Callee) &&
3572 !isa<ExternalSymbolSDNode>(Callee)) ||
3573 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3574 unsigned NumInRegs = 0;
3575 // In PIC we need an extra register to formulate the address computation
3577 unsigned MaxInRegs =
3578 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3580 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3581 CCValAssign &VA = ArgLocs[i];
3584 unsigned Reg = VA.getLocReg();
3587 case X86::EAX: case X86::EDX: case X86::ECX:
3588 if (++NumInRegs == MaxInRegs)
3600 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3601 const TargetLibraryInfo *libInfo) const {
3602 return X86::createFastISel(funcInfo, libInfo);
3605 //===----------------------------------------------------------------------===//
3606 // Other Lowering Hooks
3607 //===----------------------------------------------------------------------===//
3609 static bool MayFoldLoad(SDValue Op) {
3610 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3613 static bool MayFoldIntoStore(SDValue Op) {
3614 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3617 static bool isTargetShuffle(unsigned Opcode) {
3619 default: return false;
3620 case X86ISD::BLENDI:
3621 case X86ISD::PSHUFB:
3622 case X86ISD::PSHUFD:
3623 case X86ISD::PSHUFHW:
3624 case X86ISD::PSHUFLW:
3626 case X86ISD::PALIGNR:
3627 case X86ISD::MOVLHPS:
3628 case X86ISD::MOVLHPD:
3629 case X86ISD::MOVHLPS:
3630 case X86ISD::MOVLPS:
3631 case X86ISD::MOVLPD:
3632 case X86ISD::MOVSHDUP:
3633 case X86ISD::MOVSLDUP:
3634 case X86ISD::MOVDDUP:
3637 case X86ISD::UNPCKL:
3638 case X86ISD::UNPCKH:
3639 case X86ISD::VPERMILPI:
3640 case X86ISD::VPERM2X128:
3641 case X86ISD::VPERMI:
3646 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3647 SDValue V1, SelectionDAG &DAG) {
3649 default: llvm_unreachable("Unknown x86 shuffle node");
3650 case X86ISD::MOVSHDUP:
3651 case X86ISD::MOVSLDUP:
3652 case X86ISD::MOVDDUP:
3653 return DAG.getNode(Opc, dl, VT, V1);
3657 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3658 SDValue V1, unsigned TargetMask,
3659 SelectionDAG &DAG) {
3661 default: llvm_unreachable("Unknown x86 shuffle node");
3662 case X86ISD::PSHUFD:
3663 case X86ISD::PSHUFHW:
3664 case X86ISD::PSHUFLW:
3665 case X86ISD::VPERMILPI:
3666 case X86ISD::VPERMI:
3667 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3671 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3672 SDValue V1, SDValue V2, unsigned TargetMask,
3673 SelectionDAG &DAG) {
3675 default: llvm_unreachable("Unknown x86 shuffle node");
3676 case X86ISD::PALIGNR:
3677 case X86ISD::VALIGN:
3679 case X86ISD::VPERM2X128:
3680 return DAG.getNode(Opc, dl, VT, V1, V2,
3681 DAG.getConstant(TargetMask, MVT::i8));
3685 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3686 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3688 default: llvm_unreachable("Unknown x86 shuffle node");
3689 case X86ISD::MOVLHPS:
3690 case X86ISD::MOVLHPD:
3691 case X86ISD::MOVHLPS:
3692 case X86ISD::MOVLPS:
3693 case X86ISD::MOVLPD:
3696 case X86ISD::UNPCKL:
3697 case X86ISD::UNPCKH:
3698 return DAG.getNode(Opc, dl, VT, V1, V2);
3702 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3703 MachineFunction &MF = DAG.getMachineFunction();
3704 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
3705 DAG.getSubtarget().getRegisterInfo());
3706 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3707 int ReturnAddrIndex = FuncInfo->getRAIndex();
3709 if (ReturnAddrIndex == 0) {
3710 // Set up a frame object for the return address.
3711 unsigned SlotSize = RegInfo->getSlotSize();
3712 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3715 FuncInfo->setRAIndex(ReturnAddrIndex);
3718 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3721 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3722 bool hasSymbolicDisplacement) {
3723 // Offset should fit into 32 bit immediate field.
3724 if (!isInt<32>(Offset))
3727 // If we don't have a symbolic displacement - we don't have any extra
3729 if (!hasSymbolicDisplacement)
3732 // FIXME: Some tweaks might be needed for medium code model.
3733 if (M != CodeModel::Small && M != CodeModel::Kernel)
3736 // For small code model we assume that latest object is 16MB before end of 31
3737 // bits boundary. We may also accept pretty large negative constants knowing
3738 // that all objects are in the positive half of address space.
3739 if (M == CodeModel::Small && Offset < 16*1024*1024)
3742 // For kernel code model we know that all object resist in the negative half
3743 // of 32bits address space. We may not accept negative offsets, since they may
3744 // be just off and we may accept pretty large positive ones.
3745 if (M == CodeModel::Kernel && Offset >= 0)
3751 /// isCalleePop - Determines whether the callee is required to pop its
3752 /// own arguments. Callee pop is necessary to support tail calls.
3753 bool X86::isCalleePop(CallingConv::ID CallingConv,
3754 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3755 switch (CallingConv) {
3758 case CallingConv::X86_StdCall:
3759 case CallingConv::X86_FastCall:
3760 case CallingConv::X86_ThisCall:
3762 case CallingConv::Fast:
3763 case CallingConv::GHC:
3764 case CallingConv::HiPE:
3771 /// \brief Return true if the condition is an unsigned comparison operation.
3772 static bool isX86CCUnsigned(unsigned X86CC) {
3774 default: llvm_unreachable("Invalid integer condition!");
3775 case X86::COND_E: return true;
3776 case X86::COND_G: return false;
3777 case X86::COND_GE: return false;
3778 case X86::COND_L: return false;
3779 case X86::COND_LE: return false;
3780 case X86::COND_NE: return true;
3781 case X86::COND_B: return true;
3782 case X86::COND_A: return true;
3783 case X86::COND_BE: return true;
3784 case X86::COND_AE: return true;
3786 llvm_unreachable("covered switch fell through?!");
3789 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3790 /// specific condition code, returning the condition code and the LHS/RHS of the
3791 /// comparison to make.
3792 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3793 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3795 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3796 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3797 // X > -1 -> X == 0, jump !sign.
3798 RHS = DAG.getConstant(0, RHS.getValueType());
3799 return X86::COND_NS;
3801 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3802 // X < 0 -> X == 0, jump on sign.
3805 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3807 RHS = DAG.getConstant(0, RHS.getValueType());
3808 return X86::COND_LE;
3812 switch (SetCCOpcode) {
3813 default: llvm_unreachable("Invalid integer condition!");
3814 case ISD::SETEQ: return X86::COND_E;
3815 case ISD::SETGT: return X86::COND_G;
3816 case ISD::SETGE: return X86::COND_GE;
3817 case ISD::SETLT: return X86::COND_L;
3818 case ISD::SETLE: return X86::COND_LE;
3819 case ISD::SETNE: return X86::COND_NE;
3820 case ISD::SETULT: return X86::COND_B;
3821 case ISD::SETUGT: return X86::COND_A;
3822 case ISD::SETULE: return X86::COND_BE;
3823 case ISD::SETUGE: return X86::COND_AE;
3827 // First determine if it is required or is profitable to flip the operands.
3829 // If LHS is a foldable load, but RHS is not, flip the condition.
3830 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3831 !ISD::isNON_EXTLoad(RHS.getNode())) {
3832 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3833 std::swap(LHS, RHS);
3836 switch (SetCCOpcode) {
3842 std::swap(LHS, RHS);
3846 // On a floating point condition, the flags are set as follows:
3848 // 0 | 0 | 0 | X > Y
3849 // 0 | 0 | 1 | X < Y
3850 // 1 | 0 | 0 | X == Y
3851 // 1 | 1 | 1 | unordered
3852 switch (SetCCOpcode) {
3853 default: llvm_unreachable("Condcode should be pre-legalized away");
3855 case ISD::SETEQ: return X86::COND_E;
3856 case ISD::SETOLT: // flipped
3858 case ISD::SETGT: return X86::COND_A;
3859 case ISD::SETOLE: // flipped
3861 case ISD::SETGE: return X86::COND_AE;
3862 case ISD::SETUGT: // flipped
3864 case ISD::SETLT: return X86::COND_B;
3865 case ISD::SETUGE: // flipped
3867 case ISD::SETLE: return X86::COND_BE;
3869 case ISD::SETNE: return X86::COND_NE;
3870 case ISD::SETUO: return X86::COND_P;
3871 case ISD::SETO: return X86::COND_NP;
3873 case ISD::SETUNE: return X86::COND_INVALID;
3877 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3878 /// code. Current x86 isa includes the following FP cmov instructions:
3879 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3880 static bool hasFPCMov(unsigned X86CC) {
3896 /// isFPImmLegal - Returns true if the target can instruction select the
3897 /// specified FP immediate natively. If false, the legalizer will
3898 /// materialize the FP immediate as a load from a constant pool.
3899 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3900 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3901 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3907 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3908 ISD::LoadExtType ExtTy,
3910 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3911 // relocation target a movq or addq instruction: don't let the load shrink.
3912 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3913 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3914 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3915 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3919 /// \brief Returns true if it is beneficial to convert a load of a constant
3920 /// to just the constant itself.
3921 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3923 assert(Ty->isIntegerTy());
3925 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3926 if (BitSize == 0 || BitSize > 64)
3931 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3932 unsigned Index) const {
3933 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3936 return (Index == 0 || Index == ResVT.getVectorNumElements());
3939 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3940 // Speculate cttz only if we can directly use TZCNT.
3941 return Subtarget->hasBMI();
3944 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3945 // Speculate ctlz only if we can directly use LZCNT.
3946 return Subtarget->hasLZCNT();
3949 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3950 /// the specified range (L, H].
3951 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3952 return (Val < 0) || (Val >= Low && Val < Hi);
3955 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3956 /// specified value.
3957 static bool isUndefOrEqual(int Val, int CmpVal) {
3958 return (Val < 0 || Val == CmpVal);
3961 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3962 /// from position Pos and ending in Pos+Size, falls within the specified
3963 /// sequential range (Low, Low+Size]. or is undef.
3964 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3965 unsigned Pos, unsigned Size, int Low) {
3966 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3967 if (!isUndefOrEqual(Mask[i], Low))
3972 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3973 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3974 /// operand - by default will match for first operand.
3975 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3976 bool TestSecondOperand = false) {
3977 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3978 VT != MVT::v2f64 && VT != MVT::v2i64)
3981 unsigned NumElems = VT.getVectorNumElements();
3982 unsigned Lo = TestSecondOperand ? NumElems : 0;
3983 unsigned Hi = Lo + NumElems;
3985 for (unsigned i = 0; i < NumElems; ++i)
3986 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3992 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3993 /// is suitable for input to PSHUFHW.
3994 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3995 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3998 // Lower quadword copied in order or undef.
3999 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
4002 // Upper quadword shuffled.
4003 for (unsigned i = 4; i != 8; ++i)
4004 if (!isUndefOrInRange(Mask[i], 4, 8))
4007 if (VT == MVT::v16i16) {
4008 // Lower quadword copied in order or undef.
4009 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
4012 // Upper quadword shuffled.
4013 for (unsigned i = 12; i != 16; ++i)
4014 if (!isUndefOrInRange(Mask[i], 12, 16))
4021 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
4022 /// is suitable for input to PSHUFLW.
4023 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4024 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
4027 // Upper quadword copied in order.
4028 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
4031 // Lower quadword shuffled.
4032 for (unsigned i = 0; i != 4; ++i)
4033 if (!isUndefOrInRange(Mask[i], 0, 4))
4036 if (VT == MVT::v16i16) {
4037 // Upper quadword copied in order.
4038 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4041 // Lower quadword shuffled.
4042 for (unsigned i = 8; i != 12; ++i)
4043 if (!isUndefOrInRange(Mask[i], 8, 12))
4050 /// \brief Return true if the mask specifies a shuffle of elements that is
4051 /// suitable for input to intralane (palignr) or interlane (valign) vector
4053 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4054 unsigned NumElts = VT.getVectorNumElements();
4055 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4056 unsigned NumLaneElts = NumElts/NumLanes;
4058 // Do not handle 64-bit element shuffles with palignr.
4059 if (NumLaneElts == 2)
4062 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4064 for (i = 0; i != NumLaneElts; ++i) {
4069 // Lane is all undef, go to next lane
4070 if (i == NumLaneElts)
4073 int Start = Mask[i+l];
4075 // Make sure its in this lane in one of the sources
4076 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4077 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4080 // If not lane 0, then we must match lane 0
4081 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4084 // Correct second source to be contiguous with first source
4085 if (Start >= (int)NumElts)
4086 Start -= NumElts - NumLaneElts;
4088 // Make sure we're shifting in the right direction.
4089 if (Start <= (int)(i+l))
4094 // Check the rest of the elements to see if they are consecutive.
4095 for (++i; i != NumLaneElts; ++i) {
4096 int Idx = Mask[i+l];
4098 // Make sure its in this lane
4099 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4100 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4103 // If not lane 0, then we must match lane 0
4104 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4107 if (Idx >= (int)NumElts)
4108 Idx -= NumElts - NumLaneElts;
4110 if (!isUndefOrEqual(Idx, Start+i))
4119 /// \brief Return true if the node specifies a shuffle of elements that is
4120 /// suitable for input to PALIGNR.
4121 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4122 const X86Subtarget *Subtarget) {
4123 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4124 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4125 VT.is512BitVector())
4126 // FIXME: Add AVX512BW.
4129 return isAlignrMask(Mask, VT, false);
4132 /// \brief Return true if the node specifies a shuffle of elements that is
4133 /// suitable for input to VALIGN.
4134 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4135 const X86Subtarget *Subtarget) {
4136 // FIXME: Add AVX512VL.
4137 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4139 return isAlignrMask(Mask, VT, true);
4142 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4143 /// the two vector operands have swapped position.
4144 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4145 unsigned NumElems) {
4146 for (unsigned i = 0; i != NumElems; ++i) {
4150 else if (idx < (int)NumElems)
4151 Mask[i] = idx + NumElems;
4153 Mask[i] = idx - NumElems;
4157 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4158 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4159 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4160 /// reverse of what x86 shuffles want.
4161 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4163 unsigned NumElems = VT.getVectorNumElements();
4164 unsigned NumLanes = VT.getSizeInBits()/128;
4165 unsigned NumLaneElems = NumElems/NumLanes;
4167 if (NumLaneElems != 2 && NumLaneElems != 4)
4170 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4171 bool symetricMaskRequired =
4172 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4174 // VSHUFPSY divides the resulting vector into 4 chunks.
4175 // The sources are also splitted into 4 chunks, and each destination
4176 // chunk must come from a different source chunk.
4178 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4179 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4181 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4182 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4184 // VSHUFPDY divides the resulting vector into 4 chunks.
4185 // The sources are also splitted into 4 chunks, and each destination
4186 // chunk must come from a different source chunk.
4188 // SRC1 => X3 X2 X1 X0
4189 // SRC2 => Y3 Y2 Y1 Y0
4191 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4193 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4194 unsigned HalfLaneElems = NumLaneElems/2;
4195 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4196 for (unsigned i = 0; i != NumLaneElems; ++i) {
4197 int Idx = Mask[i+l];
4198 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4199 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4201 // For VSHUFPSY, the mask of the second half must be the same as the
4202 // first but with the appropriate offsets. This works in the same way as
4203 // VPERMILPS works with masks.
4204 if (!symetricMaskRequired || Idx < 0)
4206 if (MaskVal[i] < 0) {
4207 MaskVal[i] = Idx - l;
4210 if ((signed)(Idx - l) != MaskVal[i])
4218 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4219 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4220 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4221 if (!VT.is128BitVector())
4224 unsigned NumElems = VT.getVectorNumElements();
4229 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4230 return isUndefOrEqual(Mask[0], 6) &&
4231 isUndefOrEqual(Mask[1], 7) &&
4232 isUndefOrEqual(Mask[2], 2) &&
4233 isUndefOrEqual(Mask[3], 3);
4236 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4237 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4239 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4240 if (!VT.is128BitVector())
4243 unsigned NumElems = VT.getVectorNumElements();
4248 return isUndefOrEqual(Mask[0], 2) &&
4249 isUndefOrEqual(Mask[1], 3) &&
4250 isUndefOrEqual(Mask[2], 2) &&
4251 isUndefOrEqual(Mask[3], 3);
4254 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4255 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4256 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4257 if (!VT.is128BitVector())
4260 unsigned NumElems = VT.getVectorNumElements();
4262 if (NumElems != 2 && NumElems != 4)
4265 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4266 if (!isUndefOrEqual(Mask[i], i + NumElems))
4269 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4270 if (!isUndefOrEqual(Mask[i], i))
4276 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4277 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4278 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4279 if (!VT.is128BitVector())
4282 unsigned NumElems = VT.getVectorNumElements();
4284 if (NumElems != 2 && NumElems != 4)
4287 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4288 if (!isUndefOrEqual(Mask[i], i))
4291 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4292 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4298 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4299 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4300 /// i. e: If all but one element come from the same vector.
4301 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4302 // TODO: Deal with AVX's VINSERTPS
4303 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4306 unsigned CorrectPosV1 = 0;
4307 unsigned CorrectPosV2 = 0;
4308 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4309 if (Mask[i] == -1) {
4317 else if (Mask[i] == i + 4)
4321 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4322 // We have 3 elements (undefs count as elements from any vector) from one
4323 // vector, and one from another.
4330 // Some special combinations that can be optimized.
4333 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4334 SelectionDAG &DAG) {
4335 MVT VT = SVOp->getSimpleValueType(0);
4338 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4341 ArrayRef<int> Mask = SVOp->getMask();
4343 // These are the special masks that may be optimized.
4344 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4345 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4346 bool MatchEvenMask = true;
4347 bool MatchOddMask = true;
4348 for (int i=0; i<8; ++i) {
4349 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4350 MatchEvenMask = false;
4351 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4352 MatchOddMask = false;
4355 if (!MatchEvenMask && !MatchOddMask)
4358 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4360 SDValue Op0 = SVOp->getOperand(0);
4361 SDValue Op1 = SVOp->getOperand(1);
4363 if (MatchEvenMask) {
4364 // Shift the second operand right to 32 bits.
4365 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4366 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4368 // Shift the first operand left to 32 bits.
4369 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4370 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4372 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4373 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4376 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4377 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4378 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4379 bool HasInt256, bool V2IsSplat = false) {
4381 assert(VT.getSizeInBits() >= 128 &&
4382 "Unsupported vector type for unpckl");
4384 unsigned NumElts = VT.getVectorNumElements();
4385 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4386 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4389 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4390 "Unsupported vector type for unpckh");
4392 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4393 unsigned NumLanes = VT.getSizeInBits()/128;
4394 unsigned NumLaneElts = NumElts/NumLanes;
4396 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4397 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4398 int BitI = Mask[l+i];
4399 int BitI1 = Mask[l+i+1];
4400 if (!isUndefOrEqual(BitI, j))
4403 if (!isUndefOrEqual(BitI1, NumElts))
4406 if (!isUndefOrEqual(BitI1, j + NumElts))
4415 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4416 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4417 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4418 bool HasInt256, bool V2IsSplat = false) {
4419 assert(VT.getSizeInBits() >= 128 &&
4420 "Unsupported vector type for unpckh");
4422 unsigned NumElts = VT.getVectorNumElements();
4423 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4424 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4427 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4428 "Unsupported vector type for unpckh");
4430 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4431 unsigned NumLanes = VT.getSizeInBits()/128;
4432 unsigned NumLaneElts = NumElts/NumLanes;
4434 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4435 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4436 int BitI = Mask[l+i];
4437 int BitI1 = Mask[l+i+1];
4438 if (!isUndefOrEqual(BitI, j))
4441 if (isUndefOrEqual(BitI1, NumElts))
4444 if (!isUndefOrEqual(BitI1, j+NumElts))
4452 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4453 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4455 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4456 unsigned NumElts = VT.getVectorNumElements();
4457 bool Is256BitVec = VT.is256BitVector();
4459 if (VT.is512BitVector())
4461 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4462 "Unsupported vector type for unpckh");
4464 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4465 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4468 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4469 // FIXME: Need a better way to get rid of this, there's no latency difference
4470 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4471 // the former later. We should also remove the "_undef" special mask.
4472 if (NumElts == 4 && Is256BitVec)
4475 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4476 // independently on 128-bit lanes.
4477 unsigned NumLanes = VT.getSizeInBits()/128;
4478 unsigned NumLaneElts = NumElts/NumLanes;
4480 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4481 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4482 int BitI = Mask[l+i];
4483 int BitI1 = Mask[l+i+1];
4485 if (!isUndefOrEqual(BitI, j))
4487 if (!isUndefOrEqual(BitI1, j))
4495 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4496 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4498 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4499 unsigned NumElts = VT.getVectorNumElements();
4501 if (VT.is512BitVector())
4504 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4505 "Unsupported vector type for unpckh");
4507 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4508 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4511 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4512 // independently on 128-bit lanes.
4513 unsigned NumLanes = VT.getSizeInBits()/128;
4514 unsigned NumLaneElts = NumElts/NumLanes;
4516 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4517 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4518 int BitI = Mask[l+i];
4519 int BitI1 = Mask[l+i+1];
4520 if (!isUndefOrEqual(BitI, j))
4522 if (!isUndefOrEqual(BitI1, j))
4529 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4530 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4531 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4532 if (!VT.is512BitVector())
4535 unsigned NumElts = VT.getVectorNumElements();
4536 unsigned HalfSize = NumElts/2;
4537 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4538 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4543 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4544 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4552 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4553 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4554 /// MOVSD, and MOVD, i.e. setting the lowest element.
4555 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4556 if (VT.getVectorElementType().getSizeInBits() < 32)
4558 if (!VT.is128BitVector())
4561 unsigned NumElts = VT.getVectorNumElements();
4563 if (!isUndefOrEqual(Mask[0], NumElts))
4566 for (unsigned i = 1; i != NumElts; ++i)
4567 if (!isUndefOrEqual(Mask[i], i))
4573 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4574 /// as permutations between 128-bit chunks or halves. As an example: this
4576 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4577 /// The first half comes from the second half of V1 and the second half from the
4578 /// the second half of V2.
4579 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4580 if (!HasFp256 || !VT.is256BitVector())
4583 // The shuffle result is divided into half A and half B. In total the two
4584 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4585 // B must come from C, D, E or F.
4586 unsigned HalfSize = VT.getVectorNumElements()/2;
4587 bool MatchA = false, MatchB = false;
4589 // Check if A comes from one of C, D, E, F.
4590 for (unsigned Half = 0; Half != 4; ++Half) {
4591 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4597 // Check if B comes from one of C, D, E, F.
4598 for (unsigned Half = 0; Half != 4; ++Half) {
4599 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4605 return MatchA && MatchB;
4608 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4609 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4610 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4611 MVT VT = SVOp->getSimpleValueType(0);
4613 unsigned HalfSize = VT.getVectorNumElements()/2;
4615 unsigned FstHalf = 0, SndHalf = 0;
4616 for (unsigned i = 0; i < HalfSize; ++i) {
4617 if (SVOp->getMaskElt(i) > 0) {
4618 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4622 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4623 if (SVOp->getMaskElt(i) > 0) {
4624 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4629 return (FstHalf | (SndHalf << 4));
4632 // Symetric in-lane mask. Each lane has 4 elements (for imm8)
4633 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4634 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4638 unsigned NumElts = VT.getVectorNumElements();
4640 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4641 for (unsigned i = 0; i != NumElts; ++i) {
4644 Imm8 |= Mask[i] << (i*2);
4649 unsigned LaneSize = 4;
4650 SmallVector<int, 4> MaskVal(LaneSize, -1);
4652 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4653 for (unsigned i = 0; i != LaneSize; ++i) {
4654 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4658 if (MaskVal[i] < 0) {
4659 MaskVal[i] = Mask[i+l] - l;
4660 Imm8 |= MaskVal[i] << (i*2);
4663 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4670 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4671 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4672 /// Note that VPERMIL mask matching is different depending whether theunderlying
4673 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4674 /// to the same elements of the low, but to the higher half of the source.
4675 /// In VPERMILPD the two lanes could be shuffled independently of each other
4676 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4677 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4678 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4679 if (VT.getSizeInBits() < 256 || EltSize < 32)
4681 bool symetricMaskRequired = (EltSize == 32);
4682 unsigned NumElts = VT.getVectorNumElements();
4684 unsigned NumLanes = VT.getSizeInBits()/128;
4685 unsigned LaneSize = NumElts/NumLanes;
4686 // 2 or 4 elements in one lane
4688 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4689 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4690 for (unsigned i = 0; i != LaneSize; ++i) {
4691 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4693 if (symetricMaskRequired) {
4694 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4695 ExpectedMaskVal[i] = Mask[i+l] - l;
4698 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4706 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4707 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4708 /// element of vector 2 and the other elements to come from vector 1 in order.
4709 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4710 bool V2IsSplat = false, bool V2IsUndef = false) {
4711 if (!VT.is128BitVector())
4714 unsigned NumOps = VT.getVectorNumElements();
4715 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4718 if (!isUndefOrEqual(Mask[0], 0))
4721 for (unsigned i = 1; i != NumOps; ++i)
4722 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4723 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4724 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4730 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4731 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4732 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4733 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4734 const X86Subtarget *Subtarget) {
4735 if (!Subtarget->hasSSE3())
4738 unsigned NumElems = VT.getVectorNumElements();
4740 if ((VT.is128BitVector() && NumElems != 4) ||
4741 (VT.is256BitVector() && NumElems != 8) ||
4742 (VT.is512BitVector() && NumElems != 16))
4745 // "i+1" is the value the indexed mask element must have
4746 for (unsigned i = 0; i != NumElems; i += 2)
4747 if (!isUndefOrEqual(Mask[i], i+1) ||
4748 !isUndefOrEqual(Mask[i+1], i+1))
4754 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4755 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4756 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4757 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4758 const X86Subtarget *Subtarget) {
4759 if (!Subtarget->hasSSE3())
4762 unsigned NumElems = VT.getVectorNumElements();
4764 if ((VT.is128BitVector() && NumElems != 4) ||
4765 (VT.is256BitVector() && NumElems != 8) ||
4766 (VT.is512BitVector() && NumElems != 16))
4769 // "i" is the value the indexed mask element must have
4770 for (unsigned i = 0; i != NumElems; i += 2)
4771 if (!isUndefOrEqual(Mask[i], i) ||
4772 !isUndefOrEqual(Mask[i+1], i))
4778 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4779 /// specifies a shuffle of elements that is suitable for input to 256-bit
4780 /// version of MOVDDUP.
4781 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4782 if (!HasFp256 || !VT.is256BitVector())
4785 unsigned NumElts = VT.getVectorNumElements();
4789 for (unsigned i = 0; i != NumElts/2; ++i)
4790 if (!isUndefOrEqual(Mask[i], 0))
4792 for (unsigned i = NumElts/2; i != NumElts; ++i)
4793 if (!isUndefOrEqual(Mask[i], NumElts/2))
4798 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4799 /// specifies a shuffle of elements that is suitable for input to 128-bit
4800 /// version of MOVDDUP.
4801 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4802 if (!VT.is128BitVector())
4805 unsigned e = VT.getVectorNumElements() / 2;
4806 for (unsigned i = 0; i != e; ++i)
4807 if (!isUndefOrEqual(Mask[i], i))
4809 for (unsigned i = 0; i != e; ++i)
4810 if (!isUndefOrEqual(Mask[e+i], i))
4815 /// isVEXTRACTIndex - Return true if the specified
4816 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4817 /// suitable for instruction that extract 128 or 256 bit vectors
4818 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4819 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4820 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4823 // The index should be aligned on a vecWidth-bit boundary.
4825 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4827 MVT VT = N->getSimpleValueType(0);
4828 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4829 bool Result = (Index * ElSize) % vecWidth == 0;
4834 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4835 /// operand specifies a subvector insert that is suitable for input to
4836 /// insertion of 128 or 256-bit subvectors
4837 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4838 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4839 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4841 // The index should be aligned on a vecWidth-bit boundary.
4843 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4845 MVT VT = N->getSimpleValueType(0);
4846 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4847 bool Result = (Index * ElSize) % vecWidth == 0;
4852 bool X86::isVINSERT128Index(SDNode *N) {
4853 return isVINSERTIndex(N, 128);
4856 bool X86::isVINSERT256Index(SDNode *N) {
4857 return isVINSERTIndex(N, 256);
4860 bool X86::isVEXTRACT128Index(SDNode *N) {
4861 return isVEXTRACTIndex(N, 128);
4864 bool X86::isVEXTRACT256Index(SDNode *N) {
4865 return isVEXTRACTIndex(N, 256);
4868 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4869 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4870 /// Handles 128-bit and 256-bit.
4871 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4872 MVT VT = N->getSimpleValueType(0);
4874 assert((VT.getSizeInBits() >= 128) &&
4875 "Unsupported vector type for PSHUF/SHUFP");
4877 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4878 // independently on 128-bit lanes.
4879 unsigned NumElts = VT.getVectorNumElements();
4880 unsigned NumLanes = VT.getSizeInBits()/128;
4881 unsigned NumLaneElts = NumElts/NumLanes;
4883 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4884 "Only supports 2, 4 or 8 elements per lane");
4886 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4888 for (unsigned i = 0; i != NumElts; ++i) {
4889 int Elt = N->getMaskElt(i);
4890 if (Elt < 0) continue;
4891 Elt &= NumLaneElts - 1;
4892 unsigned ShAmt = (i << Shift) % 8;
4893 Mask |= Elt << ShAmt;
4899 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4900 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4901 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4902 MVT VT = N->getSimpleValueType(0);
4904 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4905 "Unsupported vector type for PSHUFHW");
4907 unsigned NumElts = VT.getVectorNumElements();
4910 for (unsigned l = 0; l != NumElts; l += 8) {
4911 // 8 nodes per lane, but we only care about the last 4.
4912 for (unsigned i = 0; i < 4; ++i) {
4913 int Elt = N->getMaskElt(l+i+4);
4914 if (Elt < 0) continue;
4915 Elt &= 0x3; // only 2-bits.
4916 Mask |= Elt << (i * 2);
4923 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4924 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4925 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4926 MVT VT = N->getSimpleValueType(0);
4928 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4929 "Unsupported vector type for PSHUFHW");
4931 unsigned NumElts = VT.getVectorNumElements();
4934 for (unsigned l = 0; l != NumElts; l += 8) {
4935 // 8 nodes per lane, but we only care about the first 4.
4936 for (unsigned i = 0; i < 4; ++i) {
4937 int Elt = N->getMaskElt(l+i);
4938 if (Elt < 0) continue;
4939 Elt &= 0x3; // only 2-bits
4940 Mask |= Elt << (i * 2);
4947 /// \brief Return the appropriate immediate to shuffle the specified
4948 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4949 /// VALIGN (if Interlane is true) instructions.
4950 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4952 MVT VT = SVOp->getSimpleValueType(0);
4953 unsigned EltSize = InterLane ? 1 :
4954 VT.getVectorElementType().getSizeInBits() >> 3;
4956 unsigned NumElts = VT.getVectorNumElements();
4957 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4958 unsigned NumLaneElts = NumElts/NumLanes;
4962 for (i = 0; i != NumElts; ++i) {
4963 Val = SVOp->getMaskElt(i);
4967 if (Val >= (int)NumElts)
4968 Val -= NumElts - NumLaneElts;
4970 assert(Val - i > 0 && "PALIGNR imm should be positive");
4971 return (Val - i) * EltSize;
4974 /// \brief Return the appropriate immediate to shuffle the specified
4975 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4976 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4977 return getShuffleAlignrImmediate(SVOp, false);
4980 /// \brief Return the appropriate immediate to shuffle the specified
4981 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4982 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4983 return getShuffleAlignrImmediate(SVOp, true);
4987 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4988 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4989 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4990 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4993 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4995 MVT VecVT = N->getOperand(0).getSimpleValueType();
4996 MVT ElVT = VecVT.getVectorElementType();
4998 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4999 return Index / NumElemsPerChunk;
5002 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
5003 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
5004 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
5005 llvm_unreachable("Illegal insert subvector for VINSERT");
5008 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
5010 MVT VecVT = N->getSimpleValueType(0);
5011 MVT ElVT = VecVT.getVectorElementType();
5013 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
5014 return Index / NumElemsPerChunk;
5017 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
5018 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
5019 /// and VINSERTI128 instructions.
5020 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
5021 return getExtractVEXTRACTImmediate(N, 128);
5024 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
5025 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
5026 /// and VINSERTI64x4 instructions.
5027 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
5028 return getExtractVEXTRACTImmediate(N, 256);
5031 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5032 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5033 /// and VINSERTI128 instructions.
5034 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5035 return getInsertVINSERTImmediate(N, 128);
5038 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5039 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5040 /// and VINSERTI64x4 instructions.
5041 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5042 return getInsertVINSERTImmediate(N, 256);
5045 /// isZero - Returns true if Elt is a constant integer zero
5046 static bool isZero(SDValue V) {
5047 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5048 return C && C->isNullValue();
5051 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5053 bool X86::isZeroNode(SDValue Elt) {
5056 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5057 return CFP->getValueAPF().isPosZero();
5061 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5062 /// match movhlps. The lower half elements should come from upper half of
5063 /// V1 (and in order), and the upper half elements should come from the upper
5064 /// half of V2 (and in order).
5065 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5066 if (!VT.is128BitVector())
5068 if (VT.getVectorNumElements() != 4)
5070 for (unsigned i = 0, e = 2; i != e; ++i)
5071 if (!isUndefOrEqual(Mask[i], i+2))
5073 for (unsigned i = 2; i != 4; ++i)
5074 if (!isUndefOrEqual(Mask[i], i+4))
5079 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5080 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5082 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5083 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5085 N = N->getOperand(0).getNode();
5086 if (!ISD::isNON_EXTLoad(N))
5089 *LD = cast<LoadSDNode>(N);
5093 // Test whether the given value is a vector value which will be legalized
5095 static bool WillBeConstantPoolLoad(SDNode *N) {
5096 if (N->getOpcode() != ISD::BUILD_VECTOR)
5099 // Check for any non-constant elements.
5100 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5101 switch (N->getOperand(i).getNode()->getOpcode()) {
5103 case ISD::ConstantFP:
5110 // Vectors of all-zeros and all-ones are materialized with special
5111 // instructions rather than being loaded.
5112 return !ISD::isBuildVectorAllZeros(N) &&
5113 !ISD::isBuildVectorAllOnes(N);
5116 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5117 /// match movlp{s|d}. The lower half elements should come from lower half of
5118 /// V1 (and in order), and the upper half elements should come from the upper
5119 /// half of V2 (and in order). And since V1 will become the source of the
5120 /// MOVLP, it must be either a vector load or a scalar load to vector.
5121 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5122 ArrayRef<int> Mask, MVT VT) {
5123 if (!VT.is128BitVector())
5126 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5128 // Is V2 is a vector load, don't do this transformation. We will try to use
5129 // load folding shufps op.
5130 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5133 unsigned NumElems = VT.getVectorNumElements();
5135 if (NumElems != 2 && NumElems != 4)
5137 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5138 if (!isUndefOrEqual(Mask[i], i))
5140 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5141 if (!isUndefOrEqual(Mask[i], i+NumElems))
5146 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5147 /// to an zero vector.
5148 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5149 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5150 SDValue V1 = N->getOperand(0);
5151 SDValue V2 = N->getOperand(1);
5152 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5153 for (unsigned i = 0; i != NumElems; ++i) {
5154 int Idx = N->getMaskElt(i);
5155 if (Idx >= (int)NumElems) {
5156 unsigned Opc = V2.getOpcode();
5157 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5159 if (Opc != ISD::BUILD_VECTOR ||
5160 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5162 } else if (Idx >= 0) {
5163 unsigned Opc = V1.getOpcode();
5164 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5166 if (Opc != ISD::BUILD_VECTOR ||
5167 !X86::isZeroNode(V1.getOperand(Idx)))
5174 /// getZeroVector - Returns a vector of specified type with all zero elements.
5176 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5177 SelectionDAG &DAG, SDLoc dl) {
5178 assert(VT.isVector() && "Expected a vector type");
5180 // Always build SSE zero vectors as <4 x i32> bitcasted
5181 // to their dest type. This ensures they get CSE'd.
5183 if (VT.is128BitVector()) { // SSE
5184 if (Subtarget->hasSSE2()) { // SSE2
5185 SDValue Cst = DAG.getConstant(0, MVT::i32);
5186 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5188 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5189 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5191 } else if (VT.is256BitVector()) { // AVX
5192 if (Subtarget->hasInt256()) { // AVX2
5193 SDValue Cst = DAG.getConstant(0, MVT::i32);
5194 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5195 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5197 // 256-bit logic and arithmetic instructions in AVX are all
5198 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5199 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5200 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5201 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5203 } else if (VT.is512BitVector()) { // AVX-512
5204 SDValue Cst = DAG.getConstant(0, MVT::i32);
5205 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5206 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5207 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5208 } else if (VT.getScalarType() == MVT::i1) {
5209 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5210 SDValue Cst = DAG.getConstant(0, MVT::i1);
5211 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5212 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5214 llvm_unreachable("Unexpected vector type");
5216 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5219 /// getOnesVector - Returns a vector of specified type with all bits set.
5220 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5221 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5222 /// Then bitcast to their original type, ensuring they get CSE'd.
5223 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5225 assert(VT.isVector() && "Expected a vector type");
5227 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5229 if (VT.is256BitVector()) {
5230 if (HasInt256) { // AVX2
5231 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5232 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5234 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5235 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5237 } else if (VT.is128BitVector()) {
5238 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5240 llvm_unreachable("Unexpected vector type");
5242 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5245 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5246 /// that point to V2 points to its first element.
5247 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5248 for (unsigned i = 0; i != NumElems; ++i) {
5249 if (Mask[i] > (int)NumElems) {
5255 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5256 /// operation of specified width.
5257 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5259 unsigned NumElems = VT.getVectorNumElements();
5260 SmallVector<int, 8> Mask;
5261 Mask.push_back(NumElems);
5262 for (unsigned i = 1; i != NumElems; ++i)
5264 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5267 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5268 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5270 unsigned NumElems = VT.getVectorNumElements();
5271 SmallVector<int, 8> Mask;
5272 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5274 Mask.push_back(i + NumElems);
5276 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5279 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5280 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5282 unsigned NumElems = VT.getVectorNumElements();
5283 SmallVector<int, 8> Mask;
5284 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5285 Mask.push_back(i + Half);
5286 Mask.push_back(i + NumElems + Half);
5288 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5291 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5292 // a generic shuffle instruction because the target has no such instructions.
5293 // Generate shuffles which repeat i16 and i8 several times until they can be
5294 // represented by v4f32 and then be manipulated by target suported shuffles.
5295 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5296 MVT VT = V.getSimpleValueType();
5297 int NumElems = VT.getVectorNumElements();
5300 while (NumElems > 4) {
5301 if (EltNo < NumElems/2) {
5302 V = getUnpackl(DAG, dl, VT, V, V);
5304 V = getUnpackh(DAG, dl, VT, V, V);
5305 EltNo -= NumElems/2;
5312 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5313 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5314 MVT VT = V.getSimpleValueType();
5317 if (VT.is128BitVector()) {
5318 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5319 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5320 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5322 } else if (VT.is256BitVector()) {
5323 // To use VPERMILPS to splat scalars, the second half of indicies must
5324 // refer to the higher part, which is a duplication of the lower one,
5325 // because VPERMILPS can only handle in-lane permutations.
5326 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5327 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5329 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5330 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5333 llvm_unreachable("Vector size not supported");
5335 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5338 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5339 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5340 MVT SrcVT = SV->getSimpleValueType(0);
5341 SDValue V1 = SV->getOperand(0);
5344 int EltNo = SV->getSplatIndex();
5345 int NumElems = SrcVT.getVectorNumElements();
5346 bool Is256BitVec = SrcVT.is256BitVector();
5348 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5349 "Unknown how to promote splat for type");
5351 // Extract the 128-bit part containing the splat element and update
5352 // the splat element index when it refers to the higher register.
5354 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5355 if (EltNo >= NumElems/2)
5356 EltNo -= NumElems/2;
5359 // All i16 and i8 vector types can't be used directly by a generic shuffle
5360 // instruction because the target has no such instruction. Generate shuffles
5361 // which repeat i16 and i8 several times until they fit in i32, and then can
5362 // be manipulated by target suported shuffles.
5363 MVT EltVT = SrcVT.getVectorElementType();
5364 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5365 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5367 // Recreate the 256-bit vector and place the same 128-bit vector
5368 // into the low and high part. This is necessary because we want
5369 // to use VPERM* to shuffle the vectors
5371 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5374 return getLegalSplat(DAG, V1, EltNo);
5377 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5378 /// vector of zero or undef vector. This produces a shuffle where the low
5379 /// element of V2 is swizzled into the zero/undef vector, landing at element
5380 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5381 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5383 const X86Subtarget *Subtarget,
5384 SelectionDAG &DAG) {
5385 MVT VT = V2.getSimpleValueType();
5387 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5388 unsigned NumElems = VT.getVectorNumElements();
5389 SmallVector<int, 16> MaskVec;
5390 for (unsigned i = 0; i != NumElems; ++i)
5391 // If this is the insertion idx, put the low elt of V2 here.
5392 MaskVec.push_back(i == Idx ? NumElems : i);
5393 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5396 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5397 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5398 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5399 /// shuffles which use a single input multiple times, and in those cases it will
5400 /// adjust the mask to only have indices within that single input.
5401 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5402 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5403 unsigned NumElems = VT.getVectorNumElements();
5407 bool IsFakeUnary = false;
5408 switch(N->getOpcode()) {
5409 case X86ISD::BLENDI:
5410 ImmN = N->getOperand(N->getNumOperands()-1);
5411 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5414 ImmN = N->getOperand(N->getNumOperands()-1);
5415 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5416 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5418 case X86ISD::UNPCKH:
5419 DecodeUNPCKHMask(VT, Mask);
5420 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5422 case X86ISD::UNPCKL:
5423 DecodeUNPCKLMask(VT, Mask);
5424 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5426 case X86ISD::MOVHLPS:
5427 DecodeMOVHLPSMask(NumElems, Mask);
5428 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5430 case X86ISD::MOVLHPS:
5431 DecodeMOVLHPSMask(NumElems, Mask);
5432 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5434 case X86ISD::PALIGNR:
5435 ImmN = N->getOperand(N->getNumOperands()-1);
5436 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5438 case X86ISD::PSHUFD:
5439 case X86ISD::VPERMILPI:
5440 ImmN = N->getOperand(N->getNumOperands()-1);
5441 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5444 case X86ISD::PSHUFHW:
5445 ImmN = N->getOperand(N->getNumOperands()-1);
5446 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5449 case X86ISD::PSHUFLW:
5450 ImmN = N->getOperand(N->getNumOperands()-1);
5451 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5454 case X86ISD::PSHUFB: {
5456 SDValue MaskNode = N->getOperand(1);
5457 while (MaskNode->getOpcode() == ISD::BITCAST)
5458 MaskNode = MaskNode->getOperand(0);
5460 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5461 // If we have a build-vector, then things are easy.
5462 EVT VT = MaskNode.getValueType();
5463 assert(VT.isVector() &&
5464 "Can't produce a non-vector with a build_vector!");
5465 if (!VT.isInteger())
5468 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5470 SmallVector<uint64_t, 32> RawMask;
5471 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5472 SDValue Op = MaskNode->getOperand(i);
5473 if (Op->getOpcode() == ISD::UNDEF) {
5474 RawMask.push_back((uint64_t)SM_SentinelUndef);
5477 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5480 APInt MaskElement = CN->getAPIntValue();
5482 // We now have to decode the element which could be any integer size and
5483 // extract each byte of it.
5484 for (int j = 0; j < NumBytesPerElement; ++j) {
5485 // Note that this is x86 and so always little endian: the low byte is
5486 // the first byte of the mask.
5487 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5488 MaskElement = MaskElement.lshr(8);
5491 DecodePSHUFBMask(RawMask, Mask);
5495 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5499 SDValue Ptr = MaskLoad->getBasePtr();
5500 if (Ptr->getOpcode() == X86ISD::Wrapper)
5501 Ptr = Ptr->getOperand(0);
5503 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5504 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5507 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5508 DecodePSHUFBMask(C, Mask);
5514 case X86ISD::VPERMI:
5515 ImmN = N->getOperand(N->getNumOperands()-1);
5516 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5520 case X86ISD::MOVSD: {
5521 // The index 0 always comes from the first element of the second source,
5522 // this is why MOVSS and MOVSD are used in the first place. The other
5523 // elements come from the other positions of the first source vector
5524 Mask.push_back(NumElems);
5525 for (unsigned i = 1; i != NumElems; ++i) {
5530 case X86ISD::VPERM2X128:
5531 ImmN = N->getOperand(N->getNumOperands()-1);
5532 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5533 if (Mask.empty()) return false;
5535 case X86ISD::MOVSLDUP:
5536 DecodeMOVSLDUPMask(VT, Mask);
5539 case X86ISD::MOVSHDUP:
5540 DecodeMOVSHDUPMask(VT, Mask);
5543 case X86ISD::MOVDDUP:
5544 DecodeMOVDDUPMask(VT, Mask);
5547 case X86ISD::MOVLHPD:
5548 case X86ISD::MOVLPD:
5549 case X86ISD::MOVLPS:
5550 // Not yet implemented
5552 default: llvm_unreachable("unknown target shuffle node");
5555 // If we have a fake unary shuffle, the shuffle mask is spread across two
5556 // inputs that are actually the same node. Re-map the mask to always point
5557 // into the first input.
5560 if (M >= (int)Mask.size())
5566 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5567 /// element of the result of the vector shuffle.
5568 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5571 return SDValue(); // Limit search depth.
5573 SDValue V = SDValue(N, 0);
5574 EVT VT = V.getValueType();
5575 unsigned Opcode = V.getOpcode();
5577 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5578 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5579 int Elt = SV->getMaskElt(Index);
5582 return DAG.getUNDEF(VT.getVectorElementType());
5584 unsigned NumElems = VT.getVectorNumElements();
5585 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5586 : SV->getOperand(1);
5587 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5590 // Recurse into target specific vector shuffles to find scalars.
5591 if (isTargetShuffle(Opcode)) {
5592 MVT ShufVT = V.getSimpleValueType();
5593 unsigned NumElems = ShufVT.getVectorNumElements();
5594 SmallVector<int, 16> ShuffleMask;
5597 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5600 int Elt = ShuffleMask[Index];
5602 return DAG.getUNDEF(ShufVT.getVectorElementType());
5604 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5606 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5610 // Actual nodes that may contain scalar elements
5611 if (Opcode == ISD::BITCAST) {
5612 V = V.getOperand(0);
5613 EVT SrcVT = V.getValueType();
5614 unsigned NumElems = VT.getVectorNumElements();
5616 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5620 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5621 return (Index == 0) ? V.getOperand(0)
5622 : DAG.getUNDEF(VT.getVectorElementType());
5624 if (V.getOpcode() == ISD::BUILD_VECTOR)
5625 return V.getOperand(Index);
5630 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5631 /// shuffle operation which come from a consecutively from a zero. The
5632 /// search can start in two different directions, from left or right.
5633 /// We count undefs as zeros until PreferredNum is reached.
5634 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5635 unsigned NumElems, bool ZerosFromLeft,
5637 unsigned PreferredNum = -1U) {
5638 unsigned NumZeros = 0;
5639 for (unsigned i = 0; i != NumElems; ++i) {
5640 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5641 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5645 if (X86::isZeroNode(Elt))
5647 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5648 NumZeros = std::min(NumZeros + 1, PreferredNum);
5656 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5657 /// correspond consecutively to elements from one of the vector operands,
5658 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5660 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5661 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5662 unsigned NumElems, unsigned &OpNum) {
5663 bool SeenV1 = false;
5664 bool SeenV2 = false;
5666 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5667 int Idx = SVOp->getMaskElt(i);
5668 // Ignore undef indicies
5672 if (Idx < (int)NumElems)
5677 // Only accept consecutive elements from the same vector
5678 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5682 OpNum = SeenV1 ? 0 : 1;
5686 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5687 /// logical left shift of a vector.
5688 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5689 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5691 SVOp->getSimpleValueType(0).getVectorNumElements();
5692 unsigned NumZeros = getNumOfConsecutiveZeros(
5693 SVOp, NumElems, false /* check zeros from right */, DAG,
5694 SVOp->getMaskElt(0));
5700 // Considering the elements in the mask that are not consecutive zeros,
5701 // check if they consecutively come from only one of the source vectors.
5703 // V1 = {X, A, B, C} 0
5705 // vector_shuffle V1, V2 <1, 2, 3, X>
5707 if (!isShuffleMaskConsecutive(SVOp,
5708 0, // Mask Start Index
5709 NumElems-NumZeros, // Mask End Index(exclusive)
5710 NumZeros, // Where to start looking in the src vector
5711 NumElems, // Number of elements in vector
5712 OpSrc)) // Which source operand ?
5717 ShVal = SVOp->getOperand(OpSrc);
5721 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5722 /// logical left shift of a vector.
5723 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5724 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5726 SVOp->getSimpleValueType(0).getVectorNumElements();
5727 unsigned NumZeros = getNumOfConsecutiveZeros(
5728 SVOp, NumElems, true /* check zeros from left */, DAG,
5729 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5735 // Considering the elements in the mask that are not consecutive zeros,
5736 // check if they consecutively come from only one of the source vectors.
5738 // 0 { A, B, X, X } = V2
5740 // vector_shuffle V1, V2 <X, X, 4, 5>
5742 if (!isShuffleMaskConsecutive(SVOp,
5743 NumZeros, // Mask Start Index
5744 NumElems, // Mask End Index(exclusive)
5745 0, // Where to start looking in the src vector
5746 NumElems, // Number of elements in vector
5747 OpSrc)) // Which source operand ?
5752 ShVal = SVOp->getOperand(OpSrc);
5756 /// isVectorShift - Returns true if the shuffle can be implemented as a
5757 /// logical left or right shift of a vector.
5758 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5759 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5760 // Although the logic below support any bitwidth size, there are no
5761 // shift instructions which handle more than 128-bit vectors.
5762 if (!SVOp->getSimpleValueType(0).is128BitVector())
5765 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5766 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5772 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5774 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5775 unsigned NumNonZero, unsigned NumZero,
5777 const X86Subtarget* Subtarget,
5778 const TargetLowering &TLI) {
5785 for (unsigned i = 0; i < 16; ++i) {
5786 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5787 if (ThisIsNonZero && First) {
5789 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5791 V = DAG.getUNDEF(MVT::v8i16);
5796 SDValue ThisElt, LastElt;
5797 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5798 if (LastIsNonZero) {
5799 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5800 MVT::i16, Op.getOperand(i-1));
5802 if (ThisIsNonZero) {
5803 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5804 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5805 ThisElt, DAG.getConstant(8, MVT::i8));
5807 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5811 if (ThisElt.getNode())
5812 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5813 DAG.getIntPtrConstant(i/2));
5817 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5820 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5822 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5823 unsigned NumNonZero, unsigned NumZero,
5825 const X86Subtarget* Subtarget,
5826 const TargetLowering &TLI) {
5833 for (unsigned i = 0; i < 8; ++i) {
5834 bool isNonZero = (NonZeros & (1 << i)) != 0;
5838 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5840 V = DAG.getUNDEF(MVT::v8i16);
5843 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5844 MVT::v8i16, V, Op.getOperand(i),
5845 DAG.getIntPtrConstant(i));
5852 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5853 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5854 const X86Subtarget *Subtarget,
5855 const TargetLowering &TLI) {
5856 // Find all zeroable elements.
5858 for (int i=0; i < 4; ++i) {
5859 SDValue Elt = Op->getOperand(i);
5860 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5862 assert(std::count_if(&Zeroable[0], &Zeroable[4],
5863 [](bool M) { return !M; }) > 1 &&
5864 "We expect at least two non-zero elements!");
5866 // We only know how to deal with build_vector nodes where elements are either
5867 // zeroable or extract_vector_elt with constant index.
5868 SDValue FirstNonZero;
5869 unsigned FirstNonZeroIdx;
5870 for (unsigned i=0; i < 4; ++i) {
5873 SDValue Elt = Op->getOperand(i);
5874 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5875 !isa<ConstantSDNode>(Elt.getOperand(1)))
5877 // Make sure that this node is extracting from a 128-bit vector.
5878 MVT VT = Elt.getOperand(0).getSimpleValueType();
5879 if (!VT.is128BitVector())
5881 if (!FirstNonZero.getNode()) {
5883 FirstNonZeroIdx = i;
5887 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5888 SDValue V1 = FirstNonZero.getOperand(0);
5889 MVT VT = V1.getSimpleValueType();
5891 // See if this build_vector can be lowered as a blend with zero.
5893 unsigned EltMaskIdx, EltIdx;
5895 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5896 if (Zeroable[EltIdx]) {
5897 // The zero vector will be on the right hand side.
5898 Mask[EltIdx] = EltIdx+4;
5902 Elt = Op->getOperand(EltIdx);
5903 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5904 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5905 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5907 Mask[EltIdx] = EltIdx;
5911 // Let the shuffle legalizer deal with blend operations.
5912 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5913 if (V1.getSimpleValueType() != VT)
5914 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5915 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5918 // See if we can lower this build_vector to a INSERTPS.
5919 if (!Subtarget->hasSSE41())
5922 SDValue V2 = Elt.getOperand(0);
5923 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5926 bool CanFold = true;
5927 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5931 SDValue Current = Op->getOperand(i);
5932 SDValue SrcVector = Current->getOperand(0);
5935 CanFold = SrcVector == V1 &&
5936 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5942 assert(V1.getNode() && "Expected at least two non-zero elements!");
5943 if (V1.getSimpleValueType() != MVT::v4f32)
5944 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5945 if (V2.getSimpleValueType() != MVT::v4f32)
5946 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5948 // Ok, we can emit an INSERTPS instruction.
5950 for (int i = 0; i < 4; ++i)
5954 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5955 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5956 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5957 DAG.getIntPtrConstant(InsertPSMask));
5958 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5961 /// getVShift - Return a vector logical shift node.
5963 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5964 unsigned NumBits, SelectionDAG &DAG,
5965 const TargetLowering &TLI, SDLoc dl) {
5966 assert(VT.is128BitVector() && "Unknown type for VShift");
5967 EVT ShVT = MVT::v2i64;
5968 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5969 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5970 return DAG.getNode(ISD::BITCAST, dl, VT,
5971 DAG.getNode(Opc, dl, ShVT, SrcOp,
5972 DAG.getConstant(NumBits,
5973 TLI.getScalarShiftAmountTy(SrcOp.getValueType()))));
5977 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5979 // Check if the scalar load can be widened into a vector load. And if
5980 // the address is "base + cst" see if the cst can be "absorbed" into
5981 // the shuffle mask.
5982 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5983 SDValue Ptr = LD->getBasePtr();
5984 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5986 EVT PVT = LD->getValueType(0);
5987 if (PVT != MVT::i32 && PVT != MVT::f32)
5992 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5993 FI = FINode->getIndex();
5995 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5996 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5997 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5998 Offset = Ptr.getConstantOperandVal(1);
5999 Ptr = Ptr.getOperand(0);
6004 // FIXME: 256-bit vector instructions don't require a strict alignment,
6005 // improve this code to support it better.
6006 unsigned RequiredAlign = VT.getSizeInBits()/8;
6007 SDValue Chain = LD->getChain();
6008 // Make sure the stack object alignment is at least 16 or 32.
6009 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
6010 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
6011 if (MFI->isFixedObjectIndex(FI)) {
6012 // Can't change the alignment. FIXME: It's possible to compute
6013 // the exact stack offset and reference FI + adjust offset instead.
6014 // If someone *really* cares about this. That's the way to implement it.
6017 MFI->setObjectAlignment(FI, RequiredAlign);
6021 // (Offset % 16 or 32) must be multiple of 4. Then address is then
6022 // Ptr + (Offset & ~15).
6025 if ((Offset % RequiredAlign) & 3)
6027 int64_t StartOffset = Offset & ~(RequiredAlign-1);
6029 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
6030 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
6032 int EltNo = (Offset - StartOffset) >> 2;
6033 unsigned NumElems = VT.getVectorNumElements();
6035 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
6036 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
6037 LD->getPointerInfo().getWithOffset(StartOffset),
6038 false, false, false, 0);
6040 SmallVector<int, 8> Mask;
6041 for (unsigned i = 0; i != NumElems; ++i)
6042 Mask.push_back(EltNo);
6044 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6050 /// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a
6051 /// vector of type 'VT', see if the elements can be replaced by a single large
6052 /// load which has the same value as a build_vector whose operands are 'elts'.
6054 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6056 /// FIXME: we'd also like to handle the case where the last elements are zero
6057 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6058 /// There's even a handy isZeroNode for that purpose.
6059 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6060 SDLoc &DL, SelectionDAG &DAG,
6061 bool isAfterLegalize) {
6062 EVT EltVT = VT.getVectorElementType();
6063 unsigned NumElems = Elts.size();
6065 LoadSDNode *LDBase = nullptr;
6066 unsigned LastLoadedElt = -1U;
6068 // For each element in the initializer, see if we've found a load or an undef.
6069 // If we don't find an initial load element, or later load elements are
6070 // non-consecutive, bail out.
6071 for (unsigned i = 0; i < NumElems; ++i) {
6072 SDValue Elt = Elts[i];
6074 if (!Elt.getNode() ||
6075 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6078 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6080 LDBase = cast<LoadSDNode>(Elt.getNode());
6084 if (Elt.getOpcode() == ISD::UNDEF)
6087 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6088 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i))
6093 // If we have found an entire vector of loads and undefs, then return a large
6094 // load of the entire vector width starting at the base pointer. If we found
6095 // consecutive loads for the low half, generate a vzext_load node.
6096 if (LastLoadedElt == NumElems - 1) {
6098 if (isAfterLegalize &&
6099 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6102 SDValue NewLd = SDValue();
6104 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6105 LDBase->getPointerInfo(), LDBase->isVolatile(),
6106 LDBase->isNonTemporal(), LDBase->isInvariant(),
6107 LDBase->getAlignment());
6109 if (LDBase->hasAnyUseOfValue(1)) {
6110 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6112 SDValue(NewLd.getNode(), 1));
6113 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6114 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6115 SDValue(NewLd.getNode(), 1));
6121 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6122 //of a v4i32 / v4f32. It's probably worth generalizing.
6123 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6124 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6125 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6126 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6128 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6129 LDBase->getPointerInfo(),
6130 LDBase->getAlignment(),
6131 false/*isVolatile*/, true/*ReadMem*/,
6134 // Make sure the newly-created LOAD is in the same position as LDBase in
6135 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6136 // update uses of LDBase's output chain to use the TokenFactor.
6137 if (LDBase->hasAnyUseOfValue(1)) {
6138 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6139 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6140 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6141 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6142 SDValue(ResNode.getNode(), 1));
6145 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6150 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6151 /// to generate a splat value for the following cases:
6152 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6153 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6154 /// a scalar load, or a constant.
6155 /// The VBROADCAST node is returned when a pattern is found,
6156 /// or SDValue() otherwise.
6157 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6158 SelectionDAG &DAG) {
6159 // VBROADCAST requires AVX.
6160 // TODO: Splats could be generated for non-AVX CPUs using SSE
6161 // instructions, but there's less potential gain for only 128-bit vectors.
6162 if (!Subtarget->hasAVX())
6165 MVT VT = Op.getSimpleValueType();
6168 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6169 "Unsupported vector type for broadcast.");
6174 switch (Op.getOpcode()) {
6176 // Unknown pattern found.
6179 case ISD::BUILD_VECTOR: {
6180 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6181 BitVector UndefElements;
6182 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6184 // We need a splat of a single value to use broadcast, and it doesn't
6185 // make any sense if the value is only in one element of the vector.
6186 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6190 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6191 Ld.getOpcode() == ISD::ConstantFP);
6193 // Make sure that all of the users of a non-constant load are from the
6194 // BUILD_VECTOR node.
6195 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6200 case ISD::VECTOR_SHUFFLE: {
6201 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6203 // Shuffles must have a splat mask where the first element is
6205 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6208 SDValue Sc = Op.getOperand(0);
6209 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6210 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6212 if (!Subtarget->hasInt256())
6215 // Use the register form of the broadcast instruction available on AVX2.
6216 if (VT.getSizeInBits() >= 256)
6217 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6218 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6221 Ld = Sc.getOperand(0);
6222 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6223 Ld.getOpcode() == ISD::ConstantFP);
6225 // The scalar_to_vector node and the suspected
6226 // load node must have exactly one user.
6227 // Constants may have multiple users.
6229 // AVX-512 has register version of the broadcast
6230 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6231 Ld.getValueType().getSizeInBits() >= 32;
6232 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6239 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6240 bool IsGE256 = (VT.getSizeInBits() >= 256);
6242 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6243 // instruction to save 8 or more bytes of constant pool data.
6244 // TODO: If multiple splats are generated to load the same constant,
6245 // it may be detrimental to overall size. There needs to be a way to detect
6246 // that condition to know if this is truly a size win.
6247 const Function *F = DAG.getMachineFunction().getFunction();
6248 bool OptForSize = F->getAttributes().
6249 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
6251 // Handle broadcasting a single constant scalar from the constant pool
6253 // On Sandybridge (no AVX2), it is still better to load a constant vector
6254 // from the constant pool and not to broadcast it from a scalar.
6255 // But override that restriction when optimizing for size.
6256 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6257 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6258 EVT CVT = Ld.getValueType();
6259 assert(!CVT.isVector() && "Must not broadcast a vector type");
6261 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6262 // For size optimization, also splat v2f64 and v2i64, and for size opt
6263 // with AVX2, also splat i8 and i16.
6264 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6265 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6266 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6267 const Constant *C = nullptr;
6268 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6269 C = CI->getConstantIntValue();
6270 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6271 C = CF->getConstantFPValue();
6273 assert(C && "Invalid constant type");
6275 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6276 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6277 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6278 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6279 MachinePointerInfo::getConstantPool(),
6280 false, false, false, Alignment);
6282 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6286 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6288 // Handle AVX2 in-register broadcasts.
6289 if (!IsLoad && Subtarget->hasInt256() &&
6290 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6291 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6293 // The scalar source must be a normal load.
6297 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6298 (Subtarget->hasVLX() && ScalarSize == 64))
6299 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6301 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6302 // double since there is no vbroadcastsd xmm
6303 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6304 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6305 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6308 // Unsupported broadcast.
6312 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6313 /// underlying vector and index.
6315 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6317 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6319 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6320 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6323 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6325 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6327 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6328 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6331 // In this case the vector is the extract_subvector expression and the index
6332 // is 2, as specified by the shuffle.
6333 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6334 SDValue ShuffleVec = SVOp->getOperand(0);
6335 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6336 assert(ShuffleVecVT.getVectorElementType() ==
6337 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6339 int ShuffleIdx = SVOp->getMaskElt(Idx);
6340 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6341 ExtractedFromVec = ShuffleVec;
6347 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6348 MVT VT = Op.getSimpleValueType();
6350 // Skip if insert_vec_elt is not supported.
6351 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6352 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6356 unsigned NumElems = Op.getNumOperands();
6360 SmallVector<unsigned, 4> InsertIndices;
6361 SmallVector<int, 8> Mask(NumElems, -1);
6363 for (unsigned i = 0; i != NumElems; ++i) {
6364 unsigned Opc = Op.getOperand(i).getOpcode();
6366 if (Opc == ISD::UNDEF)
6369 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6370 // Quit if more than 1 elements need inserting.
6371 if (InsertIndices.size() > 1)
6374 InsertIndices.push_back(i);
6378 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6379 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6380 // Quit if non-constant index.
6381 if (!isa<ConstantSDNode>(ExtIdx))
6383 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6385 // Quit if extracted from vector of different type.
6386 if (ExtractedFromVec.getValueType() != VT)
6389 if (!VecIn1.getNode())
6390 VecIn1 = ExtractedFromVec;
6391 else if (VecIn1 != ExtractedFromVec) {
6392 if (!VecIn2.getNode())
6393 VecIn2 = ExtractedFromVec;
6394 else if (VecIn2 != ExtractedFromVec)
6395 // Quit if more than 2 vectors to shuffle
6399 if (ExtractedFromVec == VecIn1)
6401 else if (ExtractedFromVec == VecIn2)
6402 Mask[i] = Idx + NumElems;
6405 if (!VecIn1.getNode())
6408 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6409 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6410 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6411 unsigned Idx = InsertIndices[i];
6412 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6413 DAG.getIntPtrConstant(Idx));
6419 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6421 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6423 MVT VT = Op.getSimpleValueType();
6424 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6425 "Unexpected type in LowerBUILD_VECTORvXi1!");
6428 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6429 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6430 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6431 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6434 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6435 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6436 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6437 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6440 bool AllContants = true;
6441 uint64_t Immediate = 0;
6442 int NonConstIdx = -1;
6443 bool IsSplat = true;
6444 unsigned NumNonConsts = 0;
6445 unsigned NumConsts = 0;
6446 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6447 SDValue In = Op.getOperand(idx);
6448 if (In.getOpcode() == ISD::UNDEF)
6450 if (!isa<ConstantSDNode>(In)) {
6451 AllContants = false;
6456 if (cast<ConstantSDNode>(In)->getZExtValue())
6457 Immediate |= (1ULL << idx);
6459 if (In != Op.getOperand(0))
6464 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6465 DAG.getConstant(Immediate, MVT::i16));
6466 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6467 DAG.getIntPtrConstant(0));
6470 if (NumNonConsts == 1 && NonConstIdx != 0) {
6473 SDValue VecAsImm = DAG.getConstant(Immediate,
6474 MVT::getIntegerVT(VT.getSizeInBits()));
6475 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6478 DstVec = DAG.getUNDEF(VT);
6479 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6480 Op.getOperand(NonConstIdx),
6481 DAG.getIntPtrConstant(NonConstIdx));
6483 if (!IsSplat && (NonConstIdx != 0))
6484 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6485 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6488 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6489 DAG.getConstant(-1, SelectVT),
6490 DAG.getConstant(0, SelectVT));
6492 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6493 DAG.getConstant((Immediate | 1), SelectVT),
6494 DAG.getConstant(Immediate, SelectVT));
6495 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6498 /// \brief Return true if \p N implements a horizontal binop and return the
6499 /// operands for the horizontal binop into V0 and V1.
6501 /// This is a helper function of PerformBUILD_VECTORCombine.
6502 /// This function checks that the build_vector \p N in input implements a
6503 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6504 /// operation to match.
6505 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6506 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6507 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6510 /// This function only analyzes elements of \p N whose indices are
6511 /// in range [BaseIdx, LastIdx).
6512 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6514 unsigned BaseIdx, unsigned LastIdx,
6515 SDValue &V0, SDValue &V1) {
6516 EVT VT = N->getValueType(0);
6518 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6519 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6520 "Invalid Vector in input!");
6522 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6523 bool CanFold = true;
6524 unsigned ExpectedVExtractIdx = BaseIdx;
6525 unsigned NumElts = LastIdx - BaseIdx;
6526 V0 = DAG.getUNDEF(VT);
6527 V1 = DAG.getUNDEF(VT);
6529 // Check if N implements a horizontal binop.
6530 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6531 SDValue Op = N->getOperand(i + BaseIdx);
6534 if (Op->getOpcode() == ISD::UNDEF) {
6535 // Update the expected vector extract index.
6536 if (i * 2 == NumElts)
6537 ExpectedVExtractIdx = BaseIdx;
6538 ExpectedVExtractIdx += 2;
6542 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6547 SDValue Op0 = Op.getOperand(0);
6548 SDValue Op1 = Op.getOperand(1);
6550 // Try to match the following pattern:
6551 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6552 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6553 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6554 Op0.getOperand(0) == Op1.getOperand(0) &&
6555 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6556 isa<ConstantSDNode>(Op1.getOperand(1)));
6560 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6561 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6563 if (i * 2 < NumElts) {
6564 if (V0.getOpcode() == ISD::UNDEF)
6565 V0 = Op0.getOperand(0);
6567 if (V1.getOpcode() == ISD::UNDEF)
6568 V1 = Op0.getOperand(0);
6569 if (i * 2 == NumElts)
6570 ExpectedVExtractIdx = BaseIdx;
6573 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6574 if (I0 == ExpectedVExtractIdx)
6575 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6576 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6577 // Try to match the following dag sequence:
6578 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6579 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6583 ExpectedVExtractIdx += 2;
6589 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6590 /// a concat_vector.
6592 /// This is a helper function of PerformBUILD_VECTORCombine.
6593 /// This function expects two 256-bit vectors called V0 and V1.
6594 /// At first, each vector is split into two separate 128-bit vectors.
6595 /// Then, the resulting 128-bit vectors are used to implement two
6596 /// horizontal binary operations.
6598 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6600 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6601 /// the two new horizontal binop.
6602 /// When Mode is set, the first horizontal binop dag node would take as input
6603 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6604 /// horizontal binop dag node would take as input the lower 128-bit of V1
6605 /// and the upper 128-bit of V1.
6607 /// HADD V0_LO, V0_HI
6608 /// HADD V1_LO, V1_HI
6610 /// Otherwise, the first horizontal binop dag node takes as input the lower
6611 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6612 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6614 /// HADD V0_LO, V1_LO
6615 /// HADD V0_HI, V1_HI
6617 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6618 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6619 /// the upper 128-bits of the result.
6620 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6621 SDLoc DL, SelectionDAG &DAG,
6622 unsigned X86Opcode, bool Mode,
6623 bool isUndefLO, bool isUndefHI) {
6624 EVT VT = V0.getValueType();
6625 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6626 "Invalid nodes in input!");
6628 unsigned NumElts = VT.getVectorNumElements();
6629 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6630 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6631 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6632 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6633 EVT NewVT = V0_LO.getValueType();
6635 SDValue LO = DAG.getUNDEF(NewVT);
6636 SDValue HI = DAG.getUNDEF(NewVT);
6639 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6640 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6641 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6642 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6643 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6645 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6646 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6647 V1_LO->getOpcode() != ISD::UNDEF))
6648 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6650 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6651 V1_HI->getOpcode() != ISD::UNDEF))
6652 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6655 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6658 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6659 /// sequence of 'vadd + vsub + blendi'.
6660 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6661 const X86Subtarget *Subtarget) {
6663 EVT VT = BV->getValueType(0);
6664 unsigned NumElts = VT.getVectorNumElements();
6665 SDValue InVec0 = DAG.getUNDEF(VT);
6666 SDValue InVec1 = DAG.getUNDEF(VT);
6668 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6669 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6671 // Odd-numbered elements in the input build vector are obtained from
6672 // adding two integer/float elements.
6673 // Even-numbered elements in the input build vector are obtained from
6674 // subtracting two integer/float elements.
6675 unsigned ExpectedOpcode = ISD::FSUB;
6676 unsigned NextExpectedOpcode = ISD::FADD;
6677 bool AddFound = false;
6678 bool SubFound = false;
6680 for (unsigned i = 0, e = NumElts; i != e; i++) {
6681 SDValue Op = BV->getOperand(i);
6683 // Skip 'undef' values.
6684 unsigned Opcode = Op.getOpcode();
6685 if (Opcode == ISD::UNDEF) {
6686 std::swap(ExpectedOpcode, NextExpectedOpcode);
6690 // Early exit if we found an unexpected opcode.
6691 if (Opcode != ExpectedOpcode)
6694 SDValue Op0 = Op.getOperand(0);
6695 SDValue Op1 = Op.getOperand(1);
6697 // Try to match the following pattern:
6698 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6699 // Early exit if we cannot match that sequence.
6700 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6701 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6702 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6703 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6704 Op0.getOperand(1) != Op1.getOperand(1))
6707 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6711 // We found a valid add/sub node. Update the information accordingly.
6717 // Update InVec0 and InVec1.
6718 if (InVec0.getOpcode() == ISD::UNDEF)
6719 InVec0 = Op0.getOperand(0);
6720 if (InVec1.getOpcode() == ISD::UNDEF)
6721 InVec1 = Op1.getOperand(0);
6723 // Make sure that operands in input to each add/sub node always
6724 // come from a same pair of vectors.
6725 if (InVec0 != Op0.getOperand(0)) {
6726 if (ExpectedOpcode == ISD::FSUB)
6729 // FADD is commutable. Try to commute the operands
6730 // and then test again.
6731 std::swap(Op0, Op1);
6732 if (InVec0 != Op0.getOperand(0))
6736 if (InVec1 != Op1.getOperand(0))
6739 // Update the pair of expected opcodes.
6740 std::swap(ExpectedOpcode, NextExpectedOpcode);
6743 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6744 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6745 InVec1.getOpcode() != ISD::UNDEF)
6746 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6751 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6752 const X86Subtarget *Subtarget) {
6754 EVT VT = N->getValueType(0);
6755 unsigned NumElts = VT.getVectorNumElements();
6756 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6757 SDValue InVec0, InVec1;
6759 // Try to match an ADDSUB.
6760 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6761 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6762 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6763 if (Value.getNode())
6767 // Try to match horizontal ADD/SUB.
6768 unsigned NumUndefsLO = 0;
6769 unsigned NumUndefsHI = 0;
6770 unsigned Half = NumElts/2;
6772 // Count the number of UNDEF operands in the build_vector in input.
6773 for (unsigned i = 0, e = Half; i != e; ++i)
6774 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6777 for (unsigned i = Half, e = NumElts; i != e; ++i)
6778 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6781 // Early exit if this is either a build_vector of all UNDEFs or all the
6782 // operands but one are UNDEF.
6783 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6786 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6787 // Try to match an SSE3 float HADD/HSUB.
6788 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6789 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6791 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6792 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6793 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6794 // Try to match an SSSE3 integer HADD/HSUB.
6795 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6796 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6798 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6799 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6802 if (!Subtarget->hasAVX())
6805 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6806 // Try to match an AVX horizontal add/sub of packed single/double
6807 // precision floating point values from 256-bit vectors.
6808 SDValue InVec2, InVec3;
6809 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6810 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6811 ((InVec0.getOpcode() == ISD::UNDEF ||
6812 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6813 ((InVec1.getOpcode() == ISD::UNDEF ||
6814 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6815 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6817 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6818 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6819 ((InVec0.getOpcode() == ISD::UNDEF ||
6820 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6821 ((InVec1.getOpcode() == ISD::UNDEF ||
6822 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6823 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6824 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6825 // Try to match an AVX2 horizontal add/sub of signed integers.
6826 SDValue InVec2, InVec3;
6828 bool CanFold = true;
6830 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6831 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6832 ((InVec0.getOpcode() == ISD::UNDEF ||
6833 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6834 ((InVec1.getOpcode() == ISD::UNDEF ||
6835 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6836 X86Opcode = X86ISD::HADD;
6837 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6838 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6839 ((InVec0.getOpcode() == ISD::UNDEF ||
6840 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6841 ((InVec1.getOpcode() == ISD::UNDEF ||
6842 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6843 X86Opcode = X86ISD::HSUB;
6848 // Fold this build_vector into a single horizontal add/sub.
6849 // Do this only if the target has AVX2.
6850 if (Subtarget->hasAVX2())
6851 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6853 // Do not try to expand this build_vector into a pair of horizontal
6854 // add/sub if we can emit a pair of scalar add/sub.
6855 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6858 // Convert this build_vector into a pair of horizontal binop followed by
6860 bool isUndefLO = NumUndefsLO == Half;
6861 bool isUndefHI = NumUndefsHI == Half;
6862 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6863 isUndefLO, isUndefHI);
6867 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6868 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6870 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6871 X86Opcode = X86ISD::HADD;
6872 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6873 X86Opcode = X86ISD::HSUB;
6874 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6875 X86Opcode = X86ISD::FHADD;
6876 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6877 X86Opcode = X86ISD::FHSUB;
6881 // Don't try to expand this build_vector into a pair of horizontal add/sub
6882 // if we can simply emit a pair of scalar add/sub.
6883 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6886 // Convert this build_vector into two horizontal add/sub followed by
6888 bool isUndefLO = NumUndefsLO == Half;
6889 bool isUndefHI = NumUndefsHI == Half;
6890 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6891 isUndefLO, isUndefHI);
6898 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6901 MVT VT = Op.getSimpleValueType();
6902 MVT ExtVT = VT.getVectorElementType();
6903 unsigned NumElems = Op.getNumOperands();
6905 // Generate vectors for predicate vectors.
6906 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6907 return LowerBUILD_VECTORvXi1(Op, DAG);
6909 // Vectors containing all zeros can be matched by pxor and xorps later
6910 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6911 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6912 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6913 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6916 return getZeroVector(VT, Subtarget, DAG, dl);
6919 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6920 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6921 // vpcmpeqd on 256-bit vectors.
6922 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6923 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6926 if (!VT.is512BitVector())
6927 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6930 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6931 if (Broadcast.getNode())
6934 unsigned EVTBits = ExtVT.getSizeInBits();
6936 unsigned NumZero = 0;
6937 unsigned NumNonZero = 0;
6938 unsigned NonZeros = 0;
6939 bool IsAllConstants = true;
6940 SmallSet<SDValue, 8> Values;
6941 for (unsigned i = 0; i < NumElems; ++i) {
6942 SDValue Elt = Op.getOperand(i);
6943 if (Elt.getOpcode() == ISD::UNDEF)
6946 if (Elt.getOpcode() != ISD::Constant &&
6947 Elt.getOpcode() != ISD::ConstantFP)
6948 IsAllConstants = false;
6949 if (X86::isZeroNode(Elt))
6952 NonZeros |= (1 << i);
6957 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6958 if (NumNonZero == 0)
6959 return DAG.getUNDEF(VT);
6961 // Special case for single non-zero, non-undef, element.
6962 if (NumNonZero == 1) {
6963 unsigned Idx = countTrailingZeros(NonZeros);
6964 SDValue Item = Op.getOperand(Idx);
6966 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6967 // the value are obviously zero, truncate the value to i32 and do the
6968 // insertion that way. Only do this if the value is non-constant or if the
6969 // value is a constant being inserted into element 0. It is cheaper to do
6970 // a constant pool load than it is to do a movd + shuffle.
6971 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6972 (!IsAllConstants || Idx == 0)) {
6973 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6975 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6976 EVT VecVT = MVT::v4i32;
6977 unsigned VecElts = 4;
6979 // Truncate the value (which may itself be a constant) to i32, and
6980 // convert it to a vector with movd (S2V+shuffle to zero extend).
6981 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6982 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6984 // If using the new shuffle lowering, just directly insert this.
6985 if (ExperimentalVectorShuffleLowering)
6987 ISD::BITCAST, dl, VT,
6988 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6990 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6992 // Now we have our 32-bit value zero extended in the low element of
6993 // a vector. If Idx != 0, swizzle it into place.
6995 SmallVector<int, 4> Mask;
6996 Mask.push_back(Idx);
6997 for (unsigned i = 1; i != VecElts; ++i)
6999 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
7002 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7006 // If we have a constant or non-constant insertion into the low element of
7007 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
7008 // the rest of the elements. This will be matched as movd/movq/movss/movsd
7009 // depending on what the source datatype is.
7012 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7014 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
7015 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
7016 if (VT.is256BitVector() || VT.is512BitVector()) {
7017 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
7018 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
7019 Item, DAG.getIntPtrConstant(0));
7021 assert(VT.is128BitVector() && "Expected an SSE value type!");
7022 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7023 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
7024 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7027 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7028 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7029 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7030 if (VT.is256BitVector()) {
7031 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7032 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7034 assert(VT.is128BitVector() && "Expected an SSE value type!");
7035 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7037 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7041 // Is it a vector logical left shift?
7042 if (NumElems == 2 && Idx == 1 &&
7043 X86::isZeroNode(Op.getOperand(0)) &&
7044 !X86::isZeroNode(Op.getOperand(1))) {
7045 unsigned NumBits = VT.getSizeInBits();
7046 return getVShift(true, VT,
7047 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7048 VT, Op.getOperand(1)),
7049 NumBits/2, DAG, *this, dl);
7052 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7055 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7056 // is a non-constant being inserted into an element other than the low one,
7057 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7058 // movd/movss) to move this into the low element, then shuffle it into
7060 if (EVTBits == 32) {
7061 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7063 // If using the new shuffle lowering, just directly insert this.
7064 if (ExperimentalVectorShuffleLowering)
7065 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7067 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7068 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7069 SmallVector<int, 8> MaskVec;
7070 for (unsigned i = 0; i != NumElems; ++i)
7071 MaskVec.push_back(i == Idx ? 0 : 1);
7072 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7076 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7077 if (Values.size() == 1) {
7078 if (EVTBits == 32) {
7079 // Instead of a shuffle like this:
7080 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7081 // Check if it's possible to issue this instead.
7082 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7083 unsigned Idx = countTrailingZeros(NonZeros);
7084 SDValue Item = Op.getOperand(Idx);
7085 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7086 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7091 // A vector full of immediates; various special cases are already
7092 // handled, so this is best done with a single constant-pool load.
7096 // For AVX-length vectors, see if we can use a vector load to get all of the
7097 // elements, otherwise build the individual 128-bit pieces and use
7098 // shuffles to put them in place.
7099 if (VT.is256BitVector() || VT.is512BitVector()) {
7100 SmallVector<SDValue, 64> V;
7101 for (unsigned i = 0; i != NumElems; ++i)
7102 V.push_back(Op.getOperand(i));
7104 // Check for a build vector of consecutive loads.
7105 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7108 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7110 // Build both the lower and upper subvector.
7111 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7112 makeArrayRef(&V[0], NumElems/2));
7113 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7114 makeArrayRef(&V[NumElems / 2], NumElems/2));
7116 // Recreate the wider vector with the lower and upper part.
7117 if (VT.is256BitVector())
7118 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7119 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7122 // Let legalizer expand 2-wide build_vectors.
7123 if (EVTBits == 64) {
7124 if (NumNonZero == 1) {
7125 // One half is zero or undef.
7126 unsigned Idx = countTrailingZeros(NonZeros);
7127 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7128 Op.getOperand(Idx));
7129 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7134 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7135 if (EVTBits == 8 && NumElems == 16) {
7136 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7138 if (V.getNode()) return V;
7141 if (EVTBits == 16 && NumElems == 8) {
7142 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7144 if (V.getNode()) return V;
7147 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7148 if (EVTBits == 32 && NumElems == 4) {
7149 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7154 // If element VT is == 32 bits, turn it into a number of shuffles.
7155 SmallVector<SDValue, 8> V(NumElems);
7156 if (NumElems == 4 && NumZero > 0) {
7157 for (unsigned i = 0; i < 4; ++i) {
7158 bool isZero = !(NonZeros & (1 << i));
7160 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7162 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7165 for (unsigned i = 0; i < 2; ++i) {
7166 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7169 V[i] = V[i*2]; // Must be a zero vector.
7172 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7175 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7178 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7183 bool Reverse1 = (NonZeros & 0x3) == 2;
7184 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7188 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7189 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7191 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7194 if (Values.size() > 1 && VT.is128BitVector()) {
7195 // Check for a build vector of consecutive loads.
7196 for (unsigned i = 0; i < NumElems; ++i)
7197 V[i] = Op.getOperand(i);
7199 // Check for elements which are consecutive loads.
7200 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7204 // Check for a build vector from mostly shuffle plus few inserting.
7205 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7209 // For SSE 4.1, use insertps to put the high elements into the low element.
7210 if (getSubtarget()->hasSSE41()) {
7212 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7213 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7215 Result = DAG.getUNDEF(VT);
7217 for (unsigned i = 1; i < NumElems; ++i) {
7218 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7219 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7220 Op.getOperand(i), DAG.getIntPtrConstant(i));
7225 // Otherwise, expand into a number of unpckl*, start by extending each of
7226 // our (non-undef) elements to the full vector width with the element in the
7227 // bottom slot of the vector (which generates no code for SSE).
7228 for (unsigned i = 0; i < NumElems; ++i) {
7229 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7230 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7232 V[i] = DAG.getUNDEF(VT);
7235 // Next, we iteratively mix elements, e.g. for v4f32:
7236 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7237 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7238 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7239 unsigned EltStride = NumElems >> 1;
7240 while (EltStride != 0) {
7241 for (unsigned i = 0; i < EltStride; ++i) {
7242 // If V[i+EltStride] is undef and this is the first round of mixing,
7243 // then it is safe to just drop this shuffle: V[i] is already in the
7244 // right place, the one element (since it's the first round) being
7245 // inserted as undef can be dropped. This isn't safe for successive
7246 // rounds because they will permute elements within both vectors.
7247 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7248 EltStride == NumElems/2)
7251 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7260 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7261 // to create 256-bit vectors from two other 128-bit ones.
7262 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7264 MVT ResVT = Op.getSimpleValueType();
7266 assert((ResVT.is256BitVector() ||
7267 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7269 SDValue V1 = Op.getOperand(0);
7270 SDValue V2 = Op.getOperand(1);
7271 unsigned NumElems = ResVT.getVectorNumElements();
7272 if(ResVT.is256BitVector())
7273 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7275 if (Op.getNumOperands() == 4) {
7276 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7277 ResVT.getVectorNumElements()/2);
7278 SDValue V3 = Op.getOperand(2);
7279 SDValue V4 = Op.getOperand(3);
7280 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7281 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7283 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7286 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7287 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7288 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7289 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7290 Op.getNumOperands() == 4)));
7292 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7293 // from two other 128-bit ones.
7295 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7296 return LowerAVXCONCAT_VECTORS(Op, DAG);
7300 //===----------------------------------------------------------------------===//
7301 // Vector shuffle lowering
7303 // This is an experimental code path for lowering vector shuffles on x86. It is
7304 // designed to handle arbitrary vector shuffles and blends, gracefully
7305 // degrading performance as necessary. It works hard to recognize idiomatic
7306 // shuffles and lower them to optimal instruction patterns without leaving
7307 // a framework that allows reasonably efficient handling of all vector shuffle
7309 //===----------------------------------------------------------------------===//
7311 /// \brief Tiny helper function to identify a no-op mask.
7313 /// This is a somewhat boring predicate function. It checks whether the mask
7314 /// array input, which is assumed to be a single-input shuffle mask of the kind
7315 /// used by the X86 shuffle instructions (not a fully general
7316 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7317 /// in-place shuffle are 'no-op's.
7318 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7319 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7320 if (Mask[i] != -1 && Mask[i] != i)
7325 /// \brief Helper function to classify a mask as a single-input mask.
7327 /// This isn't a generic single-input test because in the vector shuffle
7328 /// lowering we canonicalize single inputs to be the first input operand. This
7329 /// means we can more quickly test for a single input by only checking whether
7330 /// an input from the second operand exists. We also assume that the size of
7331 /// mask corresponds to the size of the input vectors which isn't true in the
7332 /// fully general case.
7333 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7335 if (M >= (int)Mask.size())
7340 /// \brief Test whether there are elements crossing 128-bit lanes in this
7343 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7344 /// and we routinely test for these.
7345 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7346 int LaneSize = 128 / VT.getScalarSizeInBits();
7347 int Size = Mask.size();
7348 for (int i = 0; i < Size; ++i)
7349 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7354 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7356 /// This checks a shuffle mask to see if it is performing the same
7357 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7358 /// that it is also not lane-crossing. It may however involve a blend from the
7359 /// same lane of a second vector.
7361 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7362 /// non-trivial to compute in the face of undef lanes. The representation is
7363 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7364 /// entries from both V1 and V2 inputs to the wider mask.
7366 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7367 SmallVectorImpl<int> &RepeatedMask) {
7368 int LaneSize = 128 / VT.getScalarSizeInBits();
7369 RepeatedMask.resize(LaneSize, -1);
7370 int Size = Mask.size();
7371 for (int i = 0; i < Size; ++i) {
7374 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7375 // This entry crosses lanes, so there is no way to model this shuffle.
7378 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7379 if (RepeatedMask[i % LaneSize] == -1)
7380 // This is the first non-undef entry in this slot of a 128-bit lane.
7381 RepeatedMask[i % LaneSize] =
7382 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7383 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7384 // Found a mismatch with the repeated mask.
7390 // Hide this symbol with an anonymous namespace instead of 'static' so that MSVC
7391 // 2013 will allow us to use it as a non-type template parameter.
7394 /// \brief Implementation of the \c isShuffleEquivalent variadic functor.
7396 /// See its documentation for details.
7397 bool isShuffleEquivalentImpl(ArrayRef<int> Mask, ArrayRef<const int *> Args) {
7398 if (Mask.size() != Args.size())
7400 for (int i = 0, e = Mask.size(); i < e; ++i) {
7401 assert(*Args[i] >= 0 && "Arguments must be positive integers!");
7402 if (Mask[i] != -1 && Mask[i] != *Args[i])
7410 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7413 /// This is a fast way to test a shuffle mask against a fixed pattern:
7415 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7417 /// It returns true if the mask is exactly as wide as the argument list, and
7418 /// each element of the mask is either -1 (signifying undef) or the value given
7419 /// in the argument.
7420 static const VariadicFunction1<
7421 bool, ArrayRef<int>, int, isShuffleEquivalentImpl> isShuffleEquivalent = {};
7423 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7425 /// This helper function produces an 8-bit shuffle immediate corresponding to
7426 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7427 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7430 /// NB: We rely heavily on "undef" masks preserving the input lane.
7431 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7432 SelectionDAG &DAG) {
7433 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7434 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7435 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7436 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7437 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7440 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7441 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7442 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7443 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7444 return DAG.getConstant(Imm, MVT::i8);
7447 /// \brief Try to emit a blend instruction for a shuffle.
7449 /// This doesn't do any checks for the availability of instructions for blending
7450 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7451 /// be matched in the backend with the type given. What it does check for is
7452 /// that the shuffle mask is in fact a blend.
7453 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7454 SDValue V2, ArrayRef<int> Mask,
7455 const X86Subtarget *Subtarget,
7456 SelectionDAG &DAG) {
7458 unsigned BlendMask = 0;
7459 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7460 if (Mask[i] >= Size) {
7461 if (Mask[i] != i + Size)
7462 return SDValue(); // Shuffled V2 input!
7463 BlendMask |= 1u << i;
7466 if (Mask[i] >= 0 && Mask[i] != i)
7467 return SDValue(); // Shuffled V1 input!
7469 switch (VT.SimpleTy) {
7474 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7475 DAG.getConstant(BlendMask, MVT::i8));
7479 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7483 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7484 // that instruction.
7485 if (Subtarget->hasAVX2()) {
7486 // Scale the blend by the number of 32-bit dwords per element.
7487 int Scale = VT.getScalarSizeInBits() / 32;
7489 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7490 if (Mask[i] >= Size)
7491 for (int j = 0; j < Scale; ++j)
7492 BlendMask |= 1u << (i * Scale + j);
7494 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7495 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7496 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7497 return DAG.getNode(ISD::BITCAST, DL, VT,
7498 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7499 DAG.getConstant(BlendMask, MVT::i8)));
7503 // For integer shuffles we need to expand the mask and cast the inputs to
7504 // v8i16s prior to blending.
7505 int Scale = 8 / VT.getVectorNumElements();
7507 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7508 if (Mask[i] >= Size)
7509 for (int j = 0; j < Scale; ++j)
7510 BlendMask |= 1u << (i * Scale + j);
7512 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7513 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7514 return DAG.getNode(ISD::BITCAST, DL, VT,
7515 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7516 DAG.getConstant(BlendMask, MVT::i8)));
7520 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7521 SmallVector<int, 8> RepeatedMask;
7522 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7523 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7524 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7526 for (int i = 0; i < 8; ++i)
7527 if (RepeatedMask[i] >= 16)
7528 BlendMask |= 1u << i;
7529 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7530 DAG.getConstant(BlendMask, MVT::i8));
7535 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7536 // Scale the blend by the number of bytes per element.
7537 int Scale = VT.getScalarSizeInBits() / 8;
7538 assert(Mask.size() * Scale == 32 && "Not a 256-bit vector!");
7540 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7541 // mix of LLVM's code generator and the x86 backend. We tell the code
7542 // generator that boolean values in the elements of an x86 vector register
7543 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7544 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7545 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7546 // of the element (the remaining are ignored) and 0 in that high bit would
7547 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7548 // the LLVM model for boolean values in vector elements gets the relevant
7549 // bit set, it is set backwards and over constrained relative to x86's
7551 SDValue VSELECTMask[32];
7552 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7553 for (int j = 0; j < Scale; ++j)
7554 VSELECTMask[Scale * i + j] =
7555 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7556 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8);
7558 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1);
7559 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V2);
7561 ISD::BITCAST, DL, VT,
7562 DAG.getNode(ISD::VSELECT, DL, MVT::v32i8,
7563 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, VSELECTMask),
7568 llvm_unreachable("Not a supported integer vector type!");
7572 /// \brief Generic routine to lower a shuffle and blend as a decomposed set of
7573 /// unblended shuffles followed by an unshuffled blend.
7575 /// This matches the extremely common pattern for handling combined
7576 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7578 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7582 SelectionDAG &DAG) {
7583 // Shuffle the input elements into the desired positions in V1 and V2 and
7584 // blend them together.
7585 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7586 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7587 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7588 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7589 if (Mask[i] >= 0 && Mask[i] < Size) {
7590 V1Mask[i] = Mask[i];
7592 } else if (Mask[i] >= Size) {
7593 V2Mask[i] = Mask[i] - Size;
7594 BlendMask[i] = i + Size;
7597 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7598 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7599 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7602 /// \brief Try to lower a vector shuffle as a byte rotation.
7604 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7605 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7606 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7607 /// try to generically lower a vector shuffle through such an pattern. It
7608 /// does not check for the profitability of lowering either as PALIGNR or
7609 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7610 /// This matches shuffle vectors that look like:
7612 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7614 /// Essentially it concatenates V1 and V2, shifts right by some number of
7615 /// elements, and takes the low elements as the result. Note that while this is
7616 /// specified as a *right shift* because x86 is little-endian, it is a *left
7617 /// rotate* of the vector lanes.
7619 /// Note that this only handles 128-bit vector widths currently.
7620 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7623 const X86Subtarget *Subtarget,
7624 SelectionDAG &DAG) {
7625 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7627 // We need to detect various ways of spelling a rotation:
7628 // [11, 12, 13, 14, 15, 0, 1, 2]
7629 // [-1, 12, 13, 14, -1, -1, 1, -1]
7630 // [-1, -1, -1, -1, -1, -1, 1, 2]
7631 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7632 // [-1, 4, 5, 6, -1, -1, 9, -1]
7633 // [-1, 4, 5, 6, -1, -1, -1, -1]
7636 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7639 assert(Mask[i] >= 0 && "Only -1 is a valid negative mask element!");
7641 // Based on the mod-Size value of this mask element determine where
7642 // a rotated vector would have started.
7643 int StartIdx = i - (Mask[i] % Size);
7645 // The identity rotation isn't interesting, stop.
7648 // If we found the tail of a vector the rotation must be the missing
7649 // front. If we found the head of a vector, it must be how much of the head.
7650 int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
7653 Rotation = CandidateRotation;
7654 else if (Rotation != CandidateRotation)
7655 // The rotations don't match, so we can't match this mask.
7658 // Compute which value this mask is pointing at.
7659 SDValue MaskV = Mask[i] < Size ? V1 : V2;
7661 // Compute which of the two target values this index should be assigned to.
7662 // This reflects whether the high elements are remaining or the low elements
7664 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7666 // Either set up this value if we've not encountered it before, or check
7667 // that it remains consistent.
7670 else if (TargetV != MaskV)
7671 // This may be a rotation, but it pulls from the inputs in some
7672 // unsupported interleaving.
7676 // Check that we successfully analyzed the mask, and normalize the results.
7677 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7678 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7684 assert(VT.getSizeInBits() == 128 &&
7685 "Rotate-based lowering only supports 128-bit lowering!");
7686 assert(Mask.size() <= 16 &&
7687 "Can shuffle at most 16 bytes in a 128-bit vector!");
7689 // The actual rotate instruction rotates bytes, so we need to scale the
7690 // rotation based on how many bytes are in the vector.
7691 int Scale = 16 / Mask.size();
7693 // SSSE3 targets can use the palignr instruction
7694 if (Subtarget->hasSSSE3()) {
7695 // Cast the inputs to v16i8 to match PALIGNR.
7696 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Lo);
7697 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Hi);
7699 return DAG.getNode(ISD::BITCAST, DL, VT,
7700 DAG.getNode(X86ISD::PALIGNR, DL, MVT::v16i8, Hi, Lo,
7701 DAG.getConstant(Rotation * Scale, MVT::i8)));
7704 // Default SSE2 implementation
7705 int LoByteShift = 16 - Rotation * Scale;
7706 int HiByteShift = Rotation * Scale;
7708 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7709 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7710 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7712 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7713 DAG.getConstant(8 * LoByteShift, MVT::i8));
7714 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7715 DAG.getConstant(8 * HiByteShift, MVT::i8));
7716 return DAG.getNode(ISD::BITCAST, DL, VT,
7717 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7720 /// \brief Compute whether each element of a shuffle is zeroable.
7722 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7723 /// Either it is an undef element in the shuffle mask, the element of the input
7724 /// referenced is undef, or the element of the input referenced is known to be
7725 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7726 /// as many lanes with this technique as possible to simplify the remaining
7728 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7729 SDValue V1, SDValue V2) {
7730 SmallBitVector Zeroable(Mask.size(), false);
7732 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7733 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7735 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7737 // Handle the easy cases.
7738 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7743 // If this is an index into a build_vector node, dig out the input value and
7745 SDValue V = M < Size ? V1 : V2;
7746 if (V.getOpcode() != ISD::BUILD_VECTOR)
7749 SDValue Input = V.getOperand(M % Size);
7750 // The UNDEF opcode check really should be dead code here, but not quite
7751 // worth asserting on (it isn't invalid, just unexpected).
7752 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7759 /// \brief Try to lower a vector shuffle as a byte shift (shifts in zeros).
7761 /// Attempts to match a shuffle mask against the PSRLDQ and PSLLDQ SSE2
7762 /// byte-shift instructions. The mask must consist of a shifted sequential
7763 /// shuffle from one of the input vectors and zeroable elements for the
7764 /// remaining 'shifted in' elements.
7766 /// Note that this only handles 128-bit vector widths currently.
7767 static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
7768 SDValue V2, ArrayRef<int> Mask,
7769 SelectionDAG &DAG) {
7770 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7772 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7774 int Size = Mask.size();
7775 int Scale = 16 / Size;
7777 for (int Shift = 1; Shift < Size; Shift++) {
7778 int ByteShift = Shift * Scale;
7780 // PSRLDQ : (little-endian) right byte shift
7781 // [ 5, 6, 7, zz, zz, zz, zz, zz]
7782 // [ -1, 5, 6, 7, zz, zz, zz, zz]
7783 // [ 1, 2, -1, -1, -1, -1, zz, zz]
7784 bool ZeroableRight = true;
7785 for (int i = Size - Shift; i < Size; i++) {
7786 ZeroableRight &= Zeroable[i];
7789 if (ZeroableRight) {
7790 bool ValidShiftRight1 =
7791 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Shift);
7792 bool ValidShiftRight2 =
7793 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Size + Shift);
7795 if (ValidShiftRight1 || ValidShiftRight2) {
7796 // Cast the inputs to v2i64 to match PSRLDQ.
7797 SDValue &TargetV = ValidShiftRight1 ? V1 : V2;
7798 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7799 SDValue Shifted = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, V,
7800 DAG.getConstant(ByteShift * 8, MVT::i8));
7801 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7805 // PSLLDQ : (little-endian) left byte shift
7806 // [ zz, 0, 1, 2, 3, 4, 5, 6]
7807 // [ zz, zz, -1, -1, 2, 3, 4, -1]
7808 // [ zz, zz, zz, zz, zz, zz, -1, 1]
7809 bool ZeroableLeft = true;
7810 for (int i = 0; i < Shift; i++) {
7811 ZeroableLeft &= Zeroable[i];
7815 bool ValidShiftLeft1 =
7816 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, 0);
7817 bool ValidShiftLeft2 =
7818 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, Size);
7820 if (ValidShiftLeft1 || ValidShiftLeft2) {
7821 // Cast the inputs to v2i64 to match PSLLDQ.
7822 SDValue &TargetV = ValidShiftLeft1 ? V1 : V2;
7823 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7824 SDValue Shifted = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, V,
7825 DAG.getConstant(ByteShift * 8, MVT::i8));
7826 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7834 /// \brief Lower a vector shuffle as a zero or any extension.
7836 /// Given a specific number of elements, element bit width, and extension
7837 /// stride, produce either a zero or any extension based on the available
7838 /// features of the subtarget.
7839 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7840 SDLoc DL, MVT VT, int NumElements, int Scale, bool AnyExt, SDValue InputV,
7841 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7842 assert(Scale > 1 && "Need a scale to extend.");
7843 int EltBits = VT.getSizeInBits() / NumElements;
7844 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
7845 "Only 8, 16, and 32 bit elements can be extended.");
7846 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
7848 // Found a valid zext mask! Try various lowering strategies based on the
7849 // input type and available ISA extensions.
7850 if (Subtarget->hasSSE41()) {
7851 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
7852 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
7853 NumElements / Scale);
7854 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
7855 return DAG.getNode(ISD::BITCAST, DL, VT,
7856 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
7859 // For any extends we can cheat for larger element sizes and use shuffle
7860 // instructions that can fold with a load and/or copy.
7861 if (AnyExt && EltBits == 32) {
7862 int PSHUFDMask[4] = {0, -1, 1, -1};
7864 ISD::BITCAST, DL, VT,
7865 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7866 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
7867 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
7869 if (AnyExt && EltBits == 16 && Scale > 2) {
7870 int PSHUFDMask[4] = {0, -1, 0, -1};
7871 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7872 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
7873 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
7874 int PSHUFHWMask[4] = {1, -1, -1, -1};
7876 ISD::BITCAST, DL, VT,
7877 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
7878 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
7879 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
7882 // If this would require more than 2 unpack instructions to expand, use
7883 // pshufb when available. We can only use more than 2 unpack instructions
7884 // when zero extending i8 elements which also makes it easier to use pshufb.
7885 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
7886 assert(NumElements == 16 && "Unexpected byte vector width!");
7887 SDValue PSHUFBMask[16];
7888 for (int i = 0; i < 16; ++i)
7890 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
7891 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
7892 return DAG.getNode(ISD::BITCAST, DL, VT,
7893 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
7894 DAG.getNode(ISD::BUILD_VECTOR, DL,
7895 MVT::v16i8, PSHUFBMask)));
7898 // Otherwise emit a sequence of unpacks.
7900 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
7901 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
7902 : getZeroVector(InputVT, Subtarget, DAG, DL);
7903 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
7904 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
7908 } while (Scale > 1);
7909 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
7912 /// \brief Try to lower a vector shuffle as a zero extension on any micrarch.
7914 /// This routine will try to do everything in its power to cleverly lower
7915 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
7916 /// check for the profitability of this lowering, it tries to aggressively
7917 /// match this pattern. It will use all of the micro-architectural details it
7918 /// can to emit an efficient lowering. It handles both blends with all-zero
7919 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
7920 /// masking out later).
7922 /// The reason we have dedicated lowering for zext-style shuffles is that they
7923 /// are both incredibly common and often quite performance sensitive.
7924 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
7925 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
7926 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7927 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7929 int Bits = VT.getSizeInBits();
7930 int NumElements = Mask.size();
7932 // Define a helper function to check a particular ext-scale and lower to it if
7934 auto Lower = [&](int Scale) -> SDValue {
7937 for (int i = 0; i < NumElements; ++i) {
7939 continue; // Valid anywhere but doesn't tell us anything.
7940 if (i % Scale != 0) {
7941 // Each of the extended elements need to be zeroable.
7945 // We no longer are in the anyext case.
7950 // Each of the base elements needs to be consecutive indices into the
7951 // same input vector.
7952 SDValue V = Mask[i] < NumElements ? V1 : V2;
7955 else if (InputV != V)
7956 return SDValue(); // Flip-flopping inputs.
7958 if (Mask[i] % NumElements != i / Scale)
7959 return SDValue(); // Non-consecutive strided elements.
7962 // If we fail to find an input, we have a zero-shuffle which should always
7963 // have already been handled.
7964 // FIXME: Maybe handle this here in case during blending we end up with one?
7968 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7969 DL, VT, NumElements, Scale, AnyExt, InputV, Subtarget, DAG);
7972 // The widest scale possible for extending is to a 64-bit integer.
7973 assert(Bits % 64 == 0 &&
7974 "The number of bits in a vector must be divisible by 64 on x86!");
7975 int NumExtElements = Bits / 64;
7977 // Each iteration, try extending the elements half as much, but into twice as
7979 for (; NumExtElements < NumElements; NumExtElements *= 2) {
7980 assert(NumElements % NumExtElements == 0 &&
7981 "The input vector size must be divisible by the extended size.");
7982 if (SDValue V = Lower(NumElements / NumExtElements))
7986 // No viable ext lowering found.
7990 /// \brief Try to get a scalar value for a specific element of a vector.
7992 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
7993 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
7994 SelectionDAG &DAG) {
7995 MVT VT = V.getSimpleValueType();
7996 MVT EltVT = VT.getVectorElementType();
7997 while (V.getOpcode() == ISD::BITCAST)
7998 V = V.getOperand(0);
7999 // If the bitcasts shift the element size, we can't extract an equivalent
8001 MVT NewVT = V.getSimpleValueType();
8002 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8005 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8006 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8007 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8012 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8014 /// This is particularly important because the set of instructions varies
8015 /// significantly based on whether the operand is a load or not.
8016 static bool isShuffleFoldableLoad(SDValue V) {
8017 while (V.getOpcode() == ISD::BITCAST)
8018 V = V.getOperand(0);
8020 return ISD::isNON_EXTLoad(V.getNode());
8023 /// \brief Try to lower insertion of a single element into a zero vector.
8025 /// This is a common pattern that we have especially efficient patterns to lower
8026 /// across all subtarget feature sets.
8027 static SDValue lowerVectorShuffleAsElementInsertion(
8028 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8029 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8030 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8032 MVT EltVT = VT.getVectorElementType();
8034 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8035 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8037 bool IsV1Zeroable = true;
8038 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8039 if (i != V2Index && !Zeroable[i]) {
8040 IsV1Zeroable = false;
8044 // Check for a single input from a SCALAR_TO_VECTOR node.
8045 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8046 // all the smarts here sunk into that routine. However, the current
8047 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8048 // vector shuffle lowering is dead.
8049 if (SDValue V2S = getScalarValueForVectorElement(
8050 V2, Mask[V2Index] - Mask.size(), DAG)) {
8051 // We need to zext the scalar if it is smaller than an i32.
8052 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8053 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8054 // Using zext to expand a narrow element won't work for non-zero
8059 // Zero-extend directly to i32.
8061 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8063 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8064 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8065 EltVT == MVT::i16) {
8066 // Either not inserting from the low element of the input or the input
8067 // element size is too small to use VZEXT_MOVL to clear the high bits.
8071 if (!IsV1Zeroable) {
8072 // If V1 can't be treated as a zero vector we have fewer options to lower
8073 // this. We can't support integer vectors or non-zero targets cheaply, and
8074 // the V1 elements can't be permuted in any way.
8075 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8076 if (!VT.isFloatingPoint() || V2Index != 0)
8078 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8079 V1Mask[V2Index] = -1;
8080 if (!isNoopShuffleMask(V1Mask))
8082 // This is essentially a special case blend operation, but if we have
8083 // general purpose blend operations, they are always faster. Bail and let
8084 // the rest of the lowering handle these as blends.
8085 if (Subtarget->hasSSE41())
8088 // Otherwise, use MOVSD or MOVSS.
8089 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8090 "Only two types of floating point element types to handle!");
8091 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8095 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8097 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8100 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8101 // the desired position. Otherwise it is more efficient to do a vector
8102 // shift left. We know that we can do a vector shift left because all
8103 // the inputs are zero.
8104 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8105 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8106 V2Shuffle[V2Index] = 0;
8107 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8109 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8111 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8113 V2Index * EltVT.getSizeInBits(),
8114 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8115 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8121 /// \brief Try to lower broadcast of a single element.
8123 /// For convenience, this code also bundles all of the subtarget feature set
8124 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8125 /// a convenient way to factor it out.
8126 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8128 const X86Subtarget *Subtarget,
8129 SelectionDAG &DAG) {
8130 if (!Subtarget->hasAVX())
8132 if (VT.isInteger() && !Subtarget->hasAVX2())
8135 // Check that the mask is a broadcast.
8136 int BroadcastIdx = -1;
8138 if (M >= 0 && BroadcastIdx == -1)
8140 else if (M >= 0 && M != BroadcastIdx)
8143 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8144 "a sorted mask where the broadcast "
8147 // Go up the chain of (vector) values to try and find a scalar load that
8148 // we can combine with the broadcast.
8150 switch (V.getOpcode()) {
8151 case ISD::CONCAT_VECTORS: {
8152 int OperandSize = Mask.size() / V.getNumOperands();
8153 V = V.getOperand(BroadcastIdx / OperandSize);
8154 BroadcastIdx %= OperandSize;
8158 case ISD::INSERT_SUBVECTOR: {
8159 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8160 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8164 int BeginIdx = (int)ConstantIdx->getZExtValue();
8166 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8167 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8168 BroadcastIdx -= BeginIdx;
8179 // Check if this is a broadcast of a scalar. We special case lowering
8180 // for scalars so that we can more effectively fold with loads.
8181 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8182 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8183 V = V.getOperand(BroadcastIdx);
8185 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8187 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8189 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8190 // We can't broadcast from a vector register w/o AVX2, and we can only
8191 // broadcast from the zero-element of a vector register.
8195 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8198 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8199 // INSERTPS when the V1 elements are already in the correct locations
8200 // because otherwise we can just always use two SHUFPS instructions which
8201 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8202 // perform INSERTPS if a single V1 element is out of place and all V2
8203 // elements are zeroable.
8204 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8206 SelectionDAG &DAG) {
8207 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8208 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8209 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8210 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8212 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8215 int V1DstIndex = -1;
8216 int V2DstIndex = -1;
8217 bool V1UsedInPlace = false;
8219 for (int i = 0; i < 4; i++) {
8220 // Synthesize a zero mask from the zeroable elements (includes undefs).
8226 // Flag if we use any V1 inputs in place.
8228 V1UsedInPlace = true;
8232 // We can only insert a single non-zeroable element.
8233 if (V1DstIndex != -1 || V2DstIndex != -1)
8237 // V1 input out of place for insertion.
8240 // V2 input for insertion.
8245 // Don't bother if we have no (non-zeroable) element for insertion.
8246 if (V1DstIndex == -1 && V2DstIndex == -1)
8249 // Determine element insertion src/dst indices. The src index is from the
8250 // start of the inserted vector, not the start of the concatenated vector.
8251 unsigned V2SrcIndex = 0;
8252 if (V1DstIndex != -1) {
8253 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8254 // and don't use the original V2 at all.
8255 V2SrcIndex = Mask[V1DstIndex];
8256 V2DstIndex = V1DstIndex;
8259 V2SrcIndex = Mask[V2DstIndex] - 4;
8262 // If no V1 inputs are used in place, then the result is created only from
8263 // the zero mask and the V2 insertion - so remove V1 dependency.
8265 V1 = DAG.getUNDEF(MVT::v4f32);
8267 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8268 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8270 // Insert the V2 element into the desired position.
8272 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8273 DAG.getConstant(InsertPSMask, MVT::i8));
8276 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8278 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8279 /// support for floating point shuffles but not integer shuffles. These
8280 /// instructions will incur a domain crossing penalty on some chips though so
8281 /// it is better to avoid lowering through this for integer vectors where
8283 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8284 const X86Subtarget *Subtarget,
8285 SelectionDAG &DAG) {
8287 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8288 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8289 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8290 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8291 ArrayRef<int> Mask = SVOp->getMask();
8292 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8294 if (isSingleInputShuffleMask(Mask)) {
8295 // Use low duplicate instructions for masks that match their pattern.
8296 if (Subtarget->hasSSE3())
8297 if (isShuffleEquivalent(Mask, 0, 0))
8298 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8300 // Straight shuffle of a single input vector. Simulate this by using the
8301 // single input as both of the "inputs" to this instruction..
8302 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8304 if (Subtarget->hasAVX()) {
8305 // If we have AVX, we can use VPERMILPS which will allow folding a load
8306 // into the shuffle.
8307 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8308 DAG.getConstant(SHUFPDMask, MVT::i8));
8311 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8312 DAG.getConstant(SHUFPDMask, MVT::i8));
8314 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8315 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8317 // Use dedicated unpack instructions for masks that match their pattern.
8318 if (isShuffleEquivalent(Mask, 0, 2))
8319 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8320 if (isShuffleEquivalent(Mask, 1, 3))
8321 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8323 // If we have a single input, insert that into V1 if we can do so cheaply.
8324 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8325 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8326 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8328 // Try inverting the insertion since for v2 masks it is easy to do and we
8329 // can't reliably sort the mask one way or the other.
8330 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8331 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8332 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8333 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8337 // Try to use one of the special instruction patterns to handle two common
8338 // blend patterns if a zero-blend above didn't work.
8339 if (isShuffleEquivalent(Mask, 0, 3) || isShuffleEquivalent(Mask, 1, 3))
8340 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8341 // We can either use a special instruction to load over the low double or
8342 // to move just the low double.
8344 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8346 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8348 if (Subtarget->hasSSE41())
8349 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8353 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8354 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8355 DAG.getConstant(SHUFPDMask, MVT::i8));
8358 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8360 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8361 /// the integer unit to minimize domain crossing penalties. However, for blends
8362 /// it falls back to the floating point shuffle operation with appropriate bit
8364 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8365 const X86Subtarget *Subtarget,
8366 SelectionDAG &DAG) {
8368 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8369 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8370 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8371 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8372 ArrayRef<int> Mask = SVOp->getMask();
8373 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8375 if (isSingleInputShuffleMask(Mask)) {
8376 // Check for being able to broadcast a single element.
8377 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8378 Mask, Subtarget, DAG))
8381 // Straight shuffle of a single input vector. For everything from SSE2
8382 // onward this has a single fast instruction with no scary immediates.
8383 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8384 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8385 int WidenedMask[4] = {
8386 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8387 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8389 ISD::BITCAST, DL, MVT::v2i64,
8390 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8391 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8394 // Try to use byte shift instructions.
8395 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8396 DL, MVT::v2i64, V1, V2, Mask, DAG))
8399 // If we have a single input from V2 insert that into V1 if we can do so
8401 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8402 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8403 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8405 // Try inverting the insertion since for v2 masks it is easy to do and we
8406 // can't reliably sort the mask one way or the other.
8407 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8408 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8409 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8410 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8414 // Use dedicated unpack instructions for masks that match their pattern.
8415 if (isShuffleEquivalent(Mask, 0, 2))
8416 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8417 if (isShuffleEquivalent(Mask, 1, 3))
8418 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8420 if (Subtarget->hasSSE41())
8421 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8425 // Try to use byte rotation instructions.
8426 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8427 if (Subtarget->hasSSSE3())
8428 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8429 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8432 // We implement this with SHUFPD which is pretty lame because it will likely
8433 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8434 // However, all the alternatives are still more cycles and newer chips don't
8435 // have this problem. It would be really nice if x86 had better shuffles here.
8436 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8437 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8438 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8439 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8442 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8444 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8445 /// It makes no assumptions about whether this is the *best* lowering, it simply
8447 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8448 ArrayRef<int> Mask, SDValue V1,
8449 SDValue V2, SelectionDAG &DAG) {
8450 SDValue LowV = V1, HighV = V2;
8451 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8454 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8456 if (NumV2Elements == 1) {
8458 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8461 // Compute the index adjacent to V2Index and in the same half by toggling
8463 int V2AdjIndex = V2Index ^ 1;
8465 if (Mask[V2AdjIndex] == -1) {
8466 // Handles all the cases where we have a single V2 element and an undef.
8467 // This will only ever happen in the high lanes because we commute the
8468 // vector otherwise.
8470 std::swap(LowV, HighV);
8471 NewMask[V2Index] -= 4;
8473 // Handle the case where the V2 element ends up adjacent to a V1 element.
8474 // To make this work, blend them together as the first step.
8475 int V1Index = V2AdjIndex;
8476 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8477 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8478 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8480 // Now proceed to reconstruct the final blend as we have the necessary
8481 // high or low half formed.
8488 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8489 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8491 } else if (NumV2Elements == 2) {
8492 if (Mask[0] < 4 && Mask[1] < 4) {
8493 // Handle the easy case where we have V1 in the low lanes and V2 in the
8497 } else if (Mask[2] < 4 && Mask[3] < 4) {
8498 // We also handle the reversed case because this utility may get called
8499 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8500 // arrange things in the right direction.
8506 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8507 // trying to place elements directly, just blend them and set up the final
8508 // shuffle to place them.
8510 // The first two blend mask elements are for V1, the second two are for
8512 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8513 Mask[2] < 4 ? Mask[2] : Mask[3],
8514 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8515 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8516 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8517 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8519 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8522 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8523 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8524 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8525 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8528 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8529 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8532 /// \brief Lower 4-lane 32-bit floating point shuffles.
8534 /// Uses instructions exclusively from the floating point unit to minimize
8535 /// domain crossing penalties, as these are sufficient to implement all v4f32
8537 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8538 const X86Subtarget *Subtarget,
8539 SelectionDAG &DAG) {
8541 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8542 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8543 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8544 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8545 ArrayRef<int> Mask = SVOp->getMask();
8546 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8549 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8551 if (NumV2Elements == 0) {
8552 // Check for being able to broadcast a single element.
8553 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8554 Mask, Subtarget, DAG))
8557 // Use even/odd duplicate instructions for masks that match their pattern.
8558 if (Subtarget->hasSSE3()) {
8559 if (isShuffleEquivalent(Mask, 0, 0, 2, 2))
8560 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8561 if (isShuffleEquivalent(Mask, 1, 1, 3, 3))
8562 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8565 if (Subtarget->hasAVX()) {
8566 // If we have AVX, we can use VPERMILPS which will allow folding a load
8567 // into the shuffle.
8568 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8569 getV4X86ShuffleImm8ForMask(Mask, DAG));
8572 // Otherwise, use a straight shuffle of a single input vector. We pass the
8573 // input vector to both operands to simulate this with a SHUFPS.
8574 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8575 getV4X86ShuffleImm8ForMask(Mask, DAG));
8578 // Use dedicated unpack instructions for masks that match their pattern.
8579 if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
8580 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8581 if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
8582 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8584 // There are special ways we can lower some single-element blends. However, we
8585 // have custom ways we can lower more complex single-element blends below that
8586 // we defer to if both this and BLENDPS fail to match, so restrict this to
8587 // when the V2 input is targeting element 0 of the mask -- that is the fast
8589 if (NumV2Elements == 1 && Mask[0] >= 4)
8590 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8591 Mask, Subtarget, DAG))
8594 if (Subtarget->hasSSE41()) {
8595 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8599 // Use INSERTPS if we can complete the shuffle efficiently.
8600 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8604 // Otherwise fall back to a SHUFPS lowering strategy.
8605 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8608 /// \brief Lower 4-lane i32 vector shuffles.
8610 /// We try to handle these with integer-domain shuffles where we can, but for
8611 /// blends we use the floating point domain blend instructions.
8612 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8613 const X86Subtarget *Subtarget,
8614 SelectionDAG &DAG) {
8616 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8617 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8618 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8619 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8620 ArrayRef<int> Mask = SVOp->getMask();
8621 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8623 // Whenever we can lower this as a zext, that instruction is strictly faster
8624 // than any alternative. It also allows us to fold memory operands into the
8625 // shuffle in many cases.
8626 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8627 Mask, Subtarget, DAG))
8631 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8633 if (NumV2Elements == 0) {
8634 // Check for being able to broadcast a single element.
8635 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8636 Mask, Subtarget, DAG))
8639 // Straight shuffle of a single input vector. For everything from SSE2
8640 // onward this has a single fast instruction with no scary immediates.
8641 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8642 // but we aren't actually going to use the UNPCK instruction because doing
8643 // so prevents folding a load into this instruction or making a copy.
8644 const int UnpackLoMask[] = {0, 0, 1, 1};
8645 const int UnpackHiMask[] = {2, 2, 3, 3};
8646 if (isShuffleEquivalent(Mask, 0, 0, 1, 1))
8647 Mask = UnpackLoMask;
8648 else if (isShuffleEquivalent(Mask, 2, 2, 3, 3))
8649 Mask = UnpackHiMask;
8651 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8652 getV4X86ShuffleImm8ForMask(Mask, DAG));
8655 // Try to use byte shift instructions.
8656 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8657 DL, MVT::v4i32, V1, V2, Mask, DAG))
8660 // There are special ways we can lower some single-element blends.
8661 if (NumV2Elements == 1)
8662 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8663 Mask, Subtarget, DAG))
8666 // Use dedicated unpack instructions for masks that match their pattern.
8667 if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
8668 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
8669 if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
8670 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
8672 if (Subtarget->hasSSE41())
8673 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8677 // Try to use byte rotation instructions.
8678 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8679 if (Subtarget->hasSSSE3())
8680 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8681 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
8684 // We implement this with SHUFPS because it can blend from two vectors.
8685 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
8686 // up the inputs, bypassing domain shift penalties that we would encur if we
8687 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
8689 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
8690 DAG.getVectorShuffle(
8692 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
8693 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
8696 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
8697 /// shuffle lowering, and the most complex part.
8699 /// The lowering strategy is to try to form pairs of input lanes which are
8700 /// targeted at the same half of the final vector, and then use a dword shuffle
8701 /// to place them onto the right half, and finally unpack the paired lanes into
8702 /// their final position.
8704 /// The exact breakdown of how to form these dword pairs and align them on the
8705 /// correct sides is really tricky. See the comments within the function for
8706 /// more of the details.
8707 static SDValue lowerV8I16SingleInputVectorShuffle(
8708 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
8709 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8710 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
8711 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
8712 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
8714 SmallVector<int, 4> LoInputs;
8715 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
8716 [](int M) { return M >= 0; });
8717 std::sort(LoInputs.begin(), LoInputs.end());
8718 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
8719 SmallVector<int, 4> HiInputs;
8720 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
8721 [](int M) { return M >= 0; });
8722 std::sort(HiInputs.begin(), HiInputs.end());
8723 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
8725 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
8726 int NumHToL = LoInputs.size() - NumLToL;
8728 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
8729 int NumHToH = HiInputs.size() - NumLToH;
8730 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
8731 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
8732 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
8733 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
8735 // Check for being able to broadcast a single element.
8736 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
8737 Mask, Subtarget, DAG))
8740 // Try to use byte shift instructions.
8741 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8742 DL, MVT::v8i16, V, V, Mask, DAG))
8745 // Use dedicated unpack instructions for masks that match their pattern.
8746 if (isShuffleEquivalent(Mask, 0, 0, 1, 1, 2, 2, 3, 3))
8747 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
8748 if (isShuffleEquivalent(Mask, 4, 4, 5, 5, 6, 6, 7, 7))
8749 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
8751 // Try to use byte rotation instructions.
8752 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8753 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
8756 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
8757 // such inputs we can swap two of the dwords across the half mark and end up
8758 // with <=2 inputs to each half in each half. Once there, we can fall through
8759 // to the generic code below. For example:
8761 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8762 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
8764 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
8765 // and an existing 2-into-2 on the other half. In this case we may have to
8766 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
8767 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
8768 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
8769 // because any other situation (including a 3-into-1 or 1-into-3 in the other
8770 // half than the one we target for fixing) will be fixed when we re-enter this
8771 // path. We will also combine away any sequence of PSHUFD instructions that
8772 // result into a single instruction. Here is an example of the tricky case:
8774 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8775 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
8777 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
8779 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
8780 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
8782 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
8783 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
8785 // The result is fine to be handled by the generic logic.
8786 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
8787 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
8788 int AOffset, int BOffset) {
8789 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
8790 "Must call this with A having 3 or 1 inputs from the A half.");
8791 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
8792 "Must call this with B having 1 or 3 inputs from the B half.");
8793 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
8794 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
8796 // Compute the index of dword with only one word among the three inputs in
8797 // a half by taking the sum of the half with three inputs and subtracting
8798 // the sum of the actual three inputs. The difference is the remaining
8801 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
8802 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
8803 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
8804 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
8805 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
8806 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
8807 int TripleNonInputIdx =
8808 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
8809 TripleDWord = TripleNonInputIdx / 2;
8811 // We use xor with one to compute the adjacent DWord to whichever one the
8813 OneInputDWord = (OneInput / 2) ^ 1;
8815 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
8816 // and BToA inputs. If there is also such a problem with the BToB and AToB
8817 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
8818 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
8819 // is essential that we don't *create* a 3<-1 as then we might oscillate.
8820 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
8821 // Compute how many inputs will be flipped by swapping these DWords. We
8823 // to balance this to ensure we don't form a 3-1 shuffle in the other
8825 int NumFlippedAToBInputs =
8826 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
8827 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
8828 int NumFlippedBToBInputs =
8829 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
8830 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
8831 if ((NumFlippedAToBInputs == 1 &&
8832 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
8833 (NumFlippedBToBInputs == 1 &&
8834 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
8835 // We choose whether to fix the A half or B half based on whether that
8836 // half has zero flipped inputs. At zero, we may not be able to fix it
8837 // with that half. We also bias towards fixing the B half because that
8838 // will more commonly be the high half, and we have to bias one way.
8839 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
8840 ArrayRef<int> Inputs) {
8841 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
8842 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
8843 PinnedIdx ^ 1) != Inputs.end();
8844 // Determine whether the free index is in the flipped dword or the
8845 // unflipped dword based on where the pinned index is. We use this bit
8846 // in an xor to conditionally select the adjacent dword.
8847 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
8848 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
8849 FixFreeIdx) != Inputs.end();
8850 if (IsFixIdxInput == IsFixFreeIdxInput)
8852 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
8853 FixFreeIdx) != Inputs.end();
8854 assert(IsFixIdxInput != IsFixFreeIdxInput &&
8855 "We need to be changing the number of flipped inputs!");
8856 int PSHUFHalfMask[] = {0, 1, 2, 3};
8857 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
8858 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
8860 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
8863 if (M != -1 && M == FixIdx)
8865 else if (M != -1 && M == FixFreeIdx)
8868 if (NumFlippedBToBInputs != 0) {
8870 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
8871 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
8873 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
8875 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
8876 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
8881 int PSHUFDMask[] = {0, 1, 2, 3};
8882 PSHUFDMask[ADWord] = BDWord;
8883 PSHUFDMask[BDWord] = ADWord;
8884 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
8885 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8886 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
8887 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8889 // Adjust the mask to match the new locations of A and B.
8891 if (M != -1 && M/2 == ADWord)
8892 M = 2 * BDWord + M % 2;
8893 else if (M != -1 && M/2 == BDWord)
8894 M = 2 * ADWord + M % 2;
8896 // Recurse back into this routine to re-compute state now that this isn't
8897 // a 3 and 1 problem.
8898 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
8901 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
8902 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
8903 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
8904 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
8906 // At this point there are at most two inputs to the low and high halves from
8907 // each half. That means the inputs can always be grouped into dwords and
8908 // those dwords can then be moved to the correct half with a dword shuffle.
8909 // We use at most one low and one high word shuffle to collect these paired
8910 // inputs into dwords, and finally a dword shuffle to place them.
8911 int PSHUFLMask[4] = {-1, -1, -1, -1};
8912 int PSHUFHMask[4] = {-1, -1, -1, -1};
8913 int PSHUFDMask[4] = {-1, -1, -1, -1};
8915 // First fix the masks for all the inputs that are staying in their
8916 // original halves. This will then dictate the targets of the cross-half
8918 auto fixInPlaceInputs =
8919 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
8920 MutableArrayRef<int> SourceHalfMask,
8921 MutableArrayRef<int> HalfMask, int HalfOffset) {
8922 if (InPlaceInputs.empty())
8924 if (InPlaceInputs.size() == 1) {
8925 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
8926 InPlaceInputs[0] - HalfOffset;
8927 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
8930 if (IncomingInputs.empty()) {
8931 // Just fix all of the in place inputs.
8932 for (int Input : InPlaceInputs) {
8933 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
8934 PSHUFDMask[Input / 2] = Input / 2;
8939 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
8940 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
8941 InPlaceInputs[0] - HalfOffset;
8942 // Put the second input next to the first so that they are packed into
8943 // a dword. We find the adjacent index by toggling the low bit.
8944 int AdjIndex = InPlaceInputs[0] ^ 1;
8945 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
8946 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
8947 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
8949 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
8950 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
8952 // Now gather the cross-half inputs and place them into a free dword of
8953 // their target half.
8954 // FIXME: This operation could almost certainly be simplified dramatically to
8955 // look more like the 3-1 fixing operation.
8956 auto moveInputsToRightHalf = [&PSHUFDMask](
8957 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
8958 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
8959 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
8961 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
8962 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
8964 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
8966 int LowWord = Word & ~1;
8967 int HighWord = Word | 1;
8968 return isWordClobbered(SourceHalfMask, LowWord) ||
8969 isWordClobbered(SourceHalfMask, HighWord);
8972 if (IncomingInputs.empty())
8975 if (ExistingInputs.empty()) {
8976 // Map any dwords with inputs from them into the right half.
8977 for (int Input : IncomingInputs) {
8978 // If the source half mask maps over the inputs, turn those into
8979 // swaps and use the swapped lane.
8980 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
8981 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
8982 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
8983 Input - SourceOffset;
8984 // We have to swap the uses in our half mask in one sweep.
8985 for (int &M : HalfMask)
8986 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
8988 else if (M == Input)
8989 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
8991 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
8992 Input - SourceOffset &&
8993 "Previous placement doesn't match!");
8995 // Note that this correctly re-maps both when we do a swap and when
8996 // we observe the other side of the swap above. We rely on that to
8997 // avoid swapping the members of the input list directly.
8998 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9001 // Map the input's dword into the correct half.
9002 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9003 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9005 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9007 "Previous placement doesn't match!");
9010 // And just directly shift any other-half mask elements to be same-half
9011 // as we will have mirrored the dword containing the element into the
9012 // same position within that half.
9013 for (int &M : HalfMask)
9014 if (M >= SourceOffset && M < SourceOffset + 4) {
9015 M = M - SourceOffset + DestOffset;
9016 assert(M >= 0 && "This should never wrap below zero!");
9021 // Ensure we have the input in a viable dword of its current half. This
9022 // is particularly tricky because the original position may be clobbered
9023 // by inputs being moved and *staying* in that half.
9024 if (IncomingInputs.size() == 1) {
9025 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9026 int InputFixed = std::find(std::begin(SourceHalfMask),
9027 std::end(SourceHalfMask), -1) -
9028 std::begin(SourceHalfMask) + SourceOffset;
9029 SourceHalfMask[InputFixed - SourceOffset] =
9030 IncomingInputs[0] - SourceOffset;
9031 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9033 IncomingInputs[0] = InputFixed;
9035 } else if (IncomingInputs.size() == 2) {
9036 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9037 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9038 // We have two non-adjacent or clobbered inputs we need to extract from
9039 // the source half. To do this, we need to map them into some adjacent
9040 // dword slot in the source mask.
9041 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9042 IncomingInputs[1] - SourceOffset};
9044 // If there is a free slot in the source half mask adjacent to one of
9045 // the inputs, place the other input in it. We use (Index XOR 1) to
9046 // compute an adjacent index.
9047 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9048 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9049 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9050 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9051 InputsFixed[1] = InputsFixed[0] ^ 1;
9052 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9053 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9054 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9055 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9056 InputsFixed[0] = InputsFixed[1] ^ 1;
9057 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9058 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9059 // The two inputs are in the same DWord but it is clobbered and the
9060 // adjacent DWord isn't used at all. Move both inputs to the free
9062 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9063 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9064 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9065 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9067 // The only way we hit this point is if there is no clobbering
9068 // (because there are no off-half inputs to this half) and there is no
9069 // free slot adjacent to one of the inputs. In this case, we have to
9070 // swap an input with a non-input.
9071 for (int i = 0; i < 4; ++i)
9072 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9073 "We can't handle any clobbers here!");
9074 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9075 "Cannot have adjacent inputs here!");
9077 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9078 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9080 // We also have to update the final source mask in this case because
9081 // it may need to undo the above swap.
9082 for (int &M : FinalSourceHalfMask)
9083 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9084 M = InputsFixed[1] + SourceOffset;
9085 else if (M == InputsFixed[1] + SourceOffset)
9086 M = (InputsFixed[0] ^ 1) + SourceOffset;
9088 InputsFixed[1] = InputsFixed[0] ^ 1;
9091 // Point everything at the fixed inputs.
9092 for (int &M : HalfMask)
9093 if (M == IncomingInputs[0])
9094 M = InputsFixed[0] + SourceOffset;
9095 else if (M == IncomingInputs[1])
9096 M = InputsFixed[1] + SourceOffset;
9098 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9099 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9102 llvm_unreachable("Unhandled input size!");
9105 // Now hoist the DWord down to the right half.
9106 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9107 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9108 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9109 for (int &M : HalfMask)
9110 for (int Input : IncomingInputs)
9112 M = FreeDWord * 2 + Input % 2;
9114 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9115 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9116 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9117 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9119 // Now enact all the shuffles we've computed to move the inputs into their
9121 if (!isNoopShuffleMask(PSHUFLMask))
9122 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9123 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9124 if (!isNoopShuffleMask(PSHUFHMask))
9125 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9126 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9127 if (!isNoopShuffleMask(PSHUFDMask))
9128 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9129 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9130 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9131 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9133 // At this point, each half should contain all its inputs, and we can then
9134 // just shuffle them into their final position.
9135 assert(std::count_if(LoMask.begin(), LoMask.end(),
9136 [](int M) { return M >= 4; }) == 0 &&
9137 "Failed to lift all the high half inputs to the low mask!");
9138 assert(std::count_if(HiMask.begin(), HiMask.end(),
9139 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9140 "Failed to lift all the low half inputs to the high mask!");
9142 // Do a half shuffle for the low mask.
9143 if (!isNoopShuffleMask(LoMask))
9144 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9145 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9147 // Do a half shuffle with the high mask after shifting its values down.
9148 for (int &M : HiMask)
9151 if (!isNoopShuffleMask(HiMask))
9152 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9153 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9158 /// \brief Detect whether the mask pattern should be lowered through
9161 /// This essentially tests whether viewing the mask as an interleaving of two
9162 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9163 /// lowering it through interleaving is a significantly better strategy.
9164 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9165 int NumEvenInputs[2] = {0, 0};
9166 int NumOddInputs[2] = {0, 0};
9167 int NumLoInputs[2] = {0, 0};
9168 int NumHiInputs[2] = {0, 0};
9169 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9173 int InputIdx = Mask[i] >= Size;
9176 ++NumLoInputs[InputIdx];
9178 ++NumHiInputs[InputIdx];
9181 ++NumEvenInputs[InputIdx];
9183 ++NumOddInputs[InputIdx];
9186 // The minimum number of cross-input results for both the interleaved and
9187 // split cases. If interleaving results in fewer cross-input results, return
9189 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9190 NumEvenInputs[0] + NumOddInputs[1]);
9191 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9192 NumLoInputs[0] + NumHiInputs[1]);
9193 return InterleavedCrosses < SplitCrosses;
9196 /// \brief Blend two v8i16 vectors using a naive unpack strategy.
9198 /// This strategy only works when the inputs from each vector fit into a single
9199 /// half of that vector, and generally there are not so many inputs as to leave
9200 /// the in-place shuffles required highly constrained (and thus expensive). It
9201 /// shifts all the inputs into a single side of both input vectors and then
9202 /// uses an unpack to interleave these inputs in a single vector. At that
9203 /// point, we will fall back on the generic single input shuffle lowering.
9204 static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
9206 MutableArrayRef<int> Mask,
9207 const X86Subtarget *Subtarget,
9208 SelectionDAG &DAG) {
9209 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9210 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9211 SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
9212 for (int i = 0; i < 8; ++i)
9213 if (Mask[i] >= 0 && Mask[i] < 4)
9214 LoV1Inputs.push_back(i);
9215 else if (Mask[i] >= 4 && Mask[i] < 8)
9216 HiV1Inputs.push_back(i);
9217 else if (Mask[i] >= 8 && Mask[i] < 12)
9218 LoV2Inputs.push_back(i);
9219 else if (Mask[i] >= 12)
9220 HiV2Inputs.push_back(i);
9222 int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
9223 int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
9226 assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
9227 assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
9228 assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
9230 bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
9231 HiV1Inputs.size() + HiV2Inputs.size();
9233 auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
9234 ArrayRef<int> HiInputs, bool MoveToLo,
9236 ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
9237 ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
9238 if (BadInputs.empty())
9241 int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9242 int MoveOffset = MoveToLo ? 0 : 4;
9244 if (GoodInputs.empty()) {
9245 for (int BadInput : BadInputs) {
9246 MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
9247 Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
9250 if (GoodInputs.size() == 2) {
9251 // If the low inputs are spread across two dwords, pack them into
9253 MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
9254 MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
9255 Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
9256 Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
9258 // Otherwise pin the good inputs.
9259 for (int GoodInput : GoodInputs)
9260 MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
9263 if (BadInputs.size() == 2) {
9264 // If we have two bad inputs then there may be either one or two good
9265 // inputs fixed in place. Find a fixed input, and then find the *other*
9266 // two adjacent indices by using modular arithmetic.
9268 std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
9269 [](int M) { return M >= 0; }) -
9270 std::begin(MoveMask);
9272 ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
9273 assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
9274 assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
9275 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9276 MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
9277 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9278 Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
9280 assert(BadInputs.size() == 1 && "All sizes handled");
9281 int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
9282 std::end(MoveMask), -1) -
9283 std::begin(MoveMask);
9284 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9285 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9289 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9292 V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
9294 V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
9297 // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
9298 // cross-half traffic in the final shuffle.
9300 // Munge the mask to be a single-input mask after the unpack merges the
9304 M = 2 * (M % 4) + (M / 8);
9306 return DAG.getVectorShuffle(
9307 MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
9308 DL, MVT::v8i16, V1, V2),
9309 DAG.getUNDEF(MVT::v8i16), Mask);
9312 /// \brief Generic lowering of 8-lane i16 shuffles.
9314 /// This handles both single-input shuffles and combined shuffle/blends with
9315 /// two inputs. The single input shuffles are immediately delegated to
9316 /// a dedicated lowering routine.
9318 /// The blends are lowered in one of three fundamental ways. If there are few
9319 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9320 /// of the input is significantly cheaper when lowered as an interleaving of
9321 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9322 /// halves of the inputs separately (making them have relatively few inputs)
9323 /// and then concatenate them.
9324 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9325 const X86Subtarget *Subtarget,
9326 SelectionDAG &DAG) {
9328 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9329 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9330 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9331 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9332 ArrayRef<int> OrigMask = SVOp->getMask();
9333 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9334 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9335 MutableArrayRef<int> Mask(MaskStorage);
9337 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9339 // Whenever we can lower this as a zext, that instruction is strictly faster
9340 // than any alternative.
9341 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9342 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9345 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9346 auto isV2 = [](int M) { return M >= 8; };
9348 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9349 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9351 if (NumV2Inputs == 0)
9352 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9354 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9355 "to be V1-input shuffles.");
9357 // Try to use byte shift instructions.
9358 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9359 DL, MVT::v8i16, V1, V2, Mask, DAG))
9362 // There are special ways we can lower some single-element blends.
9363 if (NumV2Inputs == 1)
9364 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9365 Mask, Subtarget, DAG))
9368 // Use dedicated unpack instructions for masks that match their pattern.
9369 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9370 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9371 if (isShuffleEquivalent(Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9372 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9374 if (Subtarget->hasSSE41())
9375 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9379 // Try to use byte rotation instructions.
9380 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9381 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9384 if (NumV1Inputs + NumV2Inputs <= 4)
9385 return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
9387 // Check whether an interleaving lowering is likely to be more efficient.
9388 // This isn't perfect but it is a strong heuristic that tends to work well on
9389 // the kinds of shuffles that show up in practice.
9391 // FIXME: Handle 1x, 2x, and 4x interleaving.
9392 if (shouldLowerAsInterleaving(Mask)) {
9393 // FIXME: Figure out whether we should pack these into the low or high
9396 int EMask[8], OMask[8];
9397 for (int i = 0; i < 4; ++i) {
9398 EMask[i] = Mask[2*i];
9399 OMask[i] = Mask[2*i + 1];
9404 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9405 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9407 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9410 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9411 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9413 for (int i = 0; i < 4; ++i) {
9414 LoBlendMask[i] = Mask[i];
9415 HiBlendMask[i] = Mask[i + 4];
9418 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9419 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9420 LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
9421 HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
9423 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9424 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
9427 /// \brief Check whether a compaction lowering can be done by dropping even
9428 /// elements and compute how many times even elements must be dropped.
9430 /// This handles shuffles which take every Nth element where N is a power of
9431 /// two. Example shuffle masks:
9433 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9434 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9435 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9436 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9437 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9438 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9440 /// Any of these lanes can of course be undef.
9442 /// This routine only supports N <= 3.
9443 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9446 /// \returns N above, or the number of times even elements must be dropped if
9447 /// there is such a number. Otherwise returns zero.
9448 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9449 // Figure out whether we're looping over two inputs or just one.
9450 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9452 // The modulus for the shuffle vector entries is based on whether this is
9453 // a single input or not.
9454 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9455 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9456 "We should only be called with masks with a power-of-2 size!");
9458 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9460 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9461 // and 2^3 simultaneously. This is because we may have ambiguity with
9462 // partially undef inputs.
9463 bool ViableForN[3] = {true, true, true};
9465 for (int i = 0, e = Mask.size(); i < e; ++i) {
9466 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9471 bool IsAnyViable = false;
9472 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9473 if (ViableForN[j]) {
9476 // The shuffle mask must be equal to (i * 2^N) % M.
9477 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9480 ViableForN[j] = false;
9482 // Early exit if we exhaust the possible powers of two.
9487 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9491 // Return 0 as there is no viable power of two.
9495 /// \brief Generic lowering of v16i8 shuffles.
9497 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9498 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9499 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9500 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9502 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9503 const X86Subtarget *Subtarget,
9504 SelectionDAG &DAG) {
9506 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9507 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9508 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9509 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9510 ArrayRef<int> OrigMask = SVOp->getMask();
9511 assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9513 // Try to use byte shift instructions.
9514 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9515 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9518 // Try to use byte rotation instructions.
9519 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9520 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9523 // Try to use a zext lowering.
9524 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9525 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9528 int MaskStorage[16] = {
9529 OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9530 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
9531 OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
9532 OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
9533 MutableArrayRef<int> Mask(MaskStorage);
9534 MutableArrayRef<int> LoMask = Mask.slice(0, 8);
9535 MutableArrayRef<int> HiMask = Mask.slice(8, 8);
9538 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9540 // For single-input shuffles, there are some nicer lowering tricks we can use.
9541 if (NumV2Elements == 0) {
9542 // Check for being able to broadcast a single element.
9543 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9544 Mask, Subtarget, DAG))
9547 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9548 // Notably, this handles splat and partial-splat shuffles more efficiently.
9549 // However, it only makes sense if the pre-duplication shuffle simplifies
9550 // things significantly. Currently, this means we need to be able to
9551 // express the pre-duplication shuffle as an i16 shuffle.
9553 // FIXME: We should check for other patterns which can be widened into an
9554 // i16 shuffle as well.
9555 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9556 for (int i = 0; i < 16; i += 2)
9557 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9562 auto tryToWidenViaDuplication = [&]() -> SDValue {
9563 if (!canWidenViaDuplication(Mask))
9565 SmallVector<int, 4> LoInputs;
9566 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9567 [](int M) { return M >= 0 && M < 8; });
9568 std::sort(LoInputs.begin(), LoInputs.end());
9569 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9571 SmallVector<int, 4> HiInputs;
9572 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9573 [](int M) { return M >= 8; });
9574 std::sort(HiInputs.begin(), HiInputs.end());
9575 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9578 bool TargetLo = LoInputs.size() >= HiInputs.size();
9579 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9580 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9582 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9583 SmallDenseMap<int, int, 8> LaneMap;
9584 for (int I : InPlaceInputs) {
9585 PreDupI16Shuffle[I/2] = I/2;
9588 int j = TargetLo ? 0 : 4, je = j + 4;
9589 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9590 // Check if j is already a shuffle of this input. This happens when
9591 // there are two adjacent bytes after we move the low one.
9592 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9593 // If we haven't yet mapped the input, search for a slot into which
9595 while (j < je && PreDupI16Shuffle[j] != -1)
9599 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9602 // Map this input with the i16 shuffle.
9603 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9606 // Update the lane map based on the mapping we ended up with.
9607 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9610 ISD::BITCAST, DL, MVT::v16i8,
9611 DAG.getVectorShuffle(MVT::v8i16, DL,
9612 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9613 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9615 // Unpack the bytes to form the i16s that will be shuffled into place.
9616 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9617 MVT::v16i8, V1, V1);
9619 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9620 for (int i = 0; i < 16; ++i)
9621 if (Mask[i] != -1) {
9622 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9623 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9624 if (PostDupI16Shuffle[i / 2] == -1)
9625 PostDupI16Shuffle[i / 2] = MappedMask;
9627 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9628 "Conflicting entrties in the original shuffle!");
9631 ISD::BITCAST, DL, MVT::v16i8,
9632 DAG.getVectorShuffle(MVT::v8i16, DL,
9633 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9634 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9636 if (SDValue V = tryToWidenViaDuplication())
9640 // Check whether an interleaving lowering is likely to be more efficient.
9641 // This isn't perfect but it is a strong heuristic that tends to work well on
9642 // the kinds of shuffles that show up in practice.
9644 // FIXME: We need to handle other interleaving widths (i16, i32, ...).
9645 if (shouldLowerAsInterleaving(Mask)) {
9646 int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9647 return (M >= 0 && M < 8) || (M >= 16 && M < 24);
9649 int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9650 return (M >= 8 && M < 16) || M >= 24;
9652 int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9653 -1, -1, -1, -1, -1, -1, -1, -1};
9654 int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9655 -1, -1, -1, -1, -1, -1, -1, -1};
9656 bool UnpackLo = NumLoHalf >= NumHiHalf;
9657 MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
9658 MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
9659 for (int i = 0; i < 8; ++i) {
9660 TargetEMask[i] = Mask[2 * i];
9661 TargetOMask[i] = Mask[2 * i + 1];
9664 SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
9665 SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
9667 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9668 MVT::v16i8, Evens, Odds);
9671 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9672 // with PSHUFB. It is important to do this before we attempt to generate any
9673 // blends but after all of the single-input lowerings. If the single input
9674 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9675 // want to preserve that and we can DAG combine any longer sequences into
9676 // a PSHUFB in the end. But once we start blending from multiple inputs,
9677 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9678 // and there are *very* few patterns that would actually be faster than the
9679 // PSHUFB approach because of its ability to zero lanes.
9681 // FIXME: The only exceptions to the above are blends which are exact
9682 // interleavings with direct instructions supporting them. We currently don't
9683 // handle those well here.
9684 if (Subtarget->hasSSSE3()) {
9687 bool V1InUse = false;
9688 bool V2InUse = false;
9689 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9691 for (int i = 0; i < 16; ++i) {
9692 if (Mask[i] == -1) {
9693 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9695 const int ZeroMask = 0x80;
9696 int V1Idx = (Mask[i] < 16 ? Mask[i] : ZeroMask);
9697 int V2Idx = (Mask[i] < 16 ? ZeroMask : Mask[i] - 16);
9699 V1Idx = V2Idx = ZeroMask;
9700 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
9701 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
9702 V1InUse |= (ZeroMask != V1Idx);
9703 V2InUse |= (ZeroMask != V2Idx);
9708 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
9709 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9711 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
9712 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9714 // If we need shuffled inputs from both, blend the two.
9715 if (V1InUse && V2InUse)
9716 return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9718 return V1; // Single inputs are easy.
9720 return V2; // Single inputs are easy.
9721 // Shuffling to a zeroable vector.
9722 return getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
9725 // There are special ways we can lower some single-element blends.
9726 if (NumV2Elements == 1)
9727 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
9728 Mask, Subtarget, DAG))
9731 // Check whether a compaction lowering can be done. This handles shuffles
9732 // which take every Nth element for some even N. See the helper function for
9735 // We special case these as they can be particularly efficiently handled with
9736 // the PACKUSB instruction on x86 and they show up in common patterns of
9737 // rearranging bytes to truncate wide elements.
9738 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
9739 // NumEvenDrops is the power of two stride of the elements. Another way of
9740 // thinking about it is that we need to drop the even elements this many
9741 // times to get the original input.
9742 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9744 // First we need to zero all the dropped bytes.
9745 assert(NumEvenDrops <= 3 &&
9746 "No support for dropping even elements more than 3 times.");
9747 // We use the mask type to pick which bytes are preserved based on how many
9748 // elements are dropped.
9749 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
9750 SDValue ByteClearMask =
9751 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
9752 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
9753 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
9755 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
9757 // Now pack things back together.
9758 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
9759 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
9760 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
9761 for (int i = 1; i < NumEvenDrops; ++i) {
9762 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
9763 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
9769 int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9770 int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9771 int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9772 int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9774 auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
9775 MutableArrayRef<int> V1HalfBlendMask,
9776 MutableArrayRef<int> V2HalfBlendMask) {
9777 for (int i = 0; i < 8; ++i)
9778 if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
9779 V1HalfBlendMask[i] = HalfMask[i];
9781 } else if (HalfMask[i] >= 16) {
9782 V2HalfBlendMask[i] = HalfMask[i] - 16;
9783 HalfMask[i] = i + 8;
9786 buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
9787 buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
9789 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
9791 auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
9792 MutableArrayRef<int> HiBlendMask) {
9794 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
9795 // them out and avoid using UNPCK{L,H} to extract the elements of V as
9797 if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
9798 [](int M) { return M >= 0 && M % 2 == 1; }) &&
9799 std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
9800 [](int M) { return M >= 0 && M % 2 == 1; })) {
9801 // Use a mask to drop the high bytes.
9802 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
9803 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
9804 DAG.getConstant(0x00FF, MVT::v8i16));
9806 // This will be a single vector shuffle instead of a blend so nuke V2.
9807 V2 = DAG.getUNDEF(MVT::v8i16);
9809 // Squash the masks to point directly into V1.
9810 for (int &M : LoBlendMask)
9813 for (int &M : HiBlendMask)
9817 // Otherwise just unpack the low half of V into V1 and the high half into
9818 // V2 so that we can blend them as i16s.
9819 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9820 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
9821 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9822 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
9825 SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9826 SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9827 return std::make_pair(BlendedLo, BlendedHi);
9829 SDValue V1Lo, V1Hi, V2Lo, V2Hi;
9830 std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
9831 std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
9833 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
9834 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
9836 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
9839 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
9841 /// This routine breaks down the specific type of 128-bit shuffle and
9842 /// dispatches to the lowering routines accordingly.
9843 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9844 MVT VT, const X86Subtarget *Subtarget,
9845 SelectionDAG &DAG) {
9846 switch (VT.SimpleTy) {
9848 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9850 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9852 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9854 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9856 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
9858 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
9861 llvm_unreachable("Unimplemented!");
9865 /// \brief Helper function to test whether a shuffle mask could be
9866 /// simplified by widening the elements being shuffled.
9868 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
9869 /// leaves it in an unspecified state.
9871 /// NOTE: This must handle normal vector shuffle masks and *target* vector
9872 /// shuffle masks. The latter have the special property of a '-2' representing
9873 /// a zero-ed lane of a vector.
9874 static bool canWidenShuffleElements(ArrayRef<int> Mask,
9875 SmallVectorImpl<int> &WidenedMask) {
9876 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
9877 // If both elements are undef, its trivial.
9878 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
9879 WidenedMask.push_back(SM_SentinelUndef);
9883 // Check for an undef mask and a mask value properly aligned to fit with
9884 // a pair of values. If we find such a case, use the non-undef mask's value.
9885 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
9886 WidenedMask.push_back(Mask[i + 1] / 2);
9889 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
9890 WidenedMask.push_back(Mask[i] / 2);
9894 // When zeroing, we need to spread the zeroing across both lanes to widen.
9895 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
9896 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
9897 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
9898 WidenedMask.push_back(SM_SentinelZero);
9904 // Finally check if the two mask values are adjacent and aligned with
9906 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
9907 WidenedMask.push_back(Mask[i] / 2);
9911 // Otherwise we can't safely widen the elements used in this shuffle.
9914 assert(WidenedMask.size() == Mask.size() / 2 &&
9915 "Incorrect size of mask after widening the elements!");
9920 /// \brief Generic routine to split ector shuffle into half-sized shuffles.
9922 /// This routine just extracts two subvectors, shuffles them independently, and
9923 /// then concatenates them back together. This should work effectively with all
9924 /// AVX vector shuffle types.
9925 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
9926 SDValue V2, ArrayRef<int> Mask,
9927 SelectionDAG &DAG) {
9928 assert(VT.getSizeInBits() >= 256 &&
9929 "Only for 256-bit or wider vector shuffles!");
9930 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
9931 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
9933 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
9934 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
9936 int NumElements = VT.getVectorNumElements();
9937 int SplitNumElements = NumElements / 2;
9938 MVT ScalarVT = VT.getScalarType();
9939 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
9941 SDValue LoV1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V1,
9942 DAG.getIntPtrConstant(0));
9943 SDValue HiV1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V1,
9944 DAG.getIntPtrConstant(SplitNumElements));
9945 SDValue LoV2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V2,
9946 DAG.getIntPtrConstant(0));
9947 SDValue HiV2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V2,
9948 DAG.getIntPtrConstant(SplitNumElements));
9950 // Now create two 4-way blends of these half-width vectors.
9951 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
9952 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
9953 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
9954 for (int i = 0; i < SplitNumElements; ++i) {
9955 int M = HalfMask[i];
9956 if (M >= NumElements) {
9957 if (M >= NumElements + SplitNumElements)
9961 V2BlendMask.push_back(M - NumElements);
9962 V1BlendMask.push_back(-1);
9963 BlendMask.push_back(SplitNumElements + i);
9964 } else if (M >= 0) {
9965 if (M >= SplitNumElements)
9969 V2BlendMask.push_back(-1);
9970 V1BlendMask.push_back(M);
9971 BlendMask.push_back(i);
9973 V2BlendMask.push_back(-1);
9974 V1BlendMask.push_back(-1);
9975 BlendMask.push_back(-1);
9979 // Because the lowering happens after all combining takes place, we need to
9980 // manually combine these blend masks as much as possible so that we create
9981 // a minimal number of high-level vector shuffle nodes.
9983 // First try just blending the halves of V1 or V2.
9984 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
9985 return DAG.getUNDEF(SplitVT);
9986 if (!UseLoV2 && !UseHiV2)
9987 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
9988 if (!UseLoV1 && !UseHiV1)
9989 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
9991 SDValue V1Blend, V2Blend;
9992 if (UseLoV1 && UseHiV1) {
9994 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
9996 // We only use half of V1 so map the usage down into the final blend mask.
9997 V1Blend = UseLoV1 ? LoV1 : HiV1;
9998 for (int i = 0; i < SplitNumElements; ++i)
9999 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10000 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10002 if (UseLoV2 && UseHiV2) {
10004 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10006 // We only use half of V2 so map the usage down into the final blend mask.
10007 V2Blend = UseLoV2 ? LoV2 : HiV2;
10008 for (int i = 0; i < SplitNumElements; ++i)
10009 if (BlendMask[i] >= SplitNumElements)
10010 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10012 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10014 SDValue Lo = HalfBlend(LoMask);
10015 SDValue Hi = HalfBlend(HiMask);
10016 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10019 /// \brief Either split a vector in halves or decompose the shuffles and the
10022 /// This is provided as a good fallback for many lowerings of non-single-input
10023 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10024 /// between splitting the shuffle into 128-bit components and stitching those
10025 /// back together vs. extracting the single-input shuffles and blending those
10027 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10028 SDValue V2, ArrayRef<int> Mask,
10029 SelectionDAG &DAG) {
10030 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10031 "lower single-input shuffles as it "
10032 "could then recurse on itself.");
10033 int Size = Mask.size();
10035 // If this can be modeled as a broadcast of two elements followed by a blend,
10036 // prefer that lowering. This is especially important because broadcasts can
10037 // often fold with memory operands.
10038 auto DoBothBroadcast = [&] {
10039 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10042 if (V2BroadcastIdx == -1)
10043 V2BroadcastIdx = M - Size;
10044 else if (M - Size != V2BroadcastIdx)
10046 } else if (M >= 0) {
10047 if (V1BroadcastIdx == -1)
10048 V1BroadcastIdx = M;
10049 else if (M != V1BroadcastIdx)
10054 if (DoBothBroadcast())
10055 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10058 // If the inputs all stem from a single 128-bit lane of each input, then we
10059 // split them rather than blending because the split will decompose to
10060 // unusually few instructions.
10061 int LaneCount = VT.getSizeInBits() / 128;
10062 int LaneSize = Size / LaneCount;
10063 SmallBitVector LaneInputs[2];
10064 LaneInputs[0].resize(LaneCount, false);
10065 LaneInputs[1].resize(LaneCount, false);
10066 for (int i = 0; i < Size; ++i)
10068 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10069 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10070 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10072 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10073 // that the decomposed single-input shuffles don't end up here.
10074 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10077 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10078 /// a permutation and blend of those lanes.
10080 /// This essentially blends the out-of-lane inputs to each lane into the lane
10081 /// from a permuted copy of the vector. This lowering strategy results in four
10082 /// instructions in the worst case for a single-input cross lane shuffle which
10083 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10084 /// of. Special cases for each particular shuffle pattern should be handled
10085 /// prior to trying this lowering.
10086 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10087 SDValue V1, SDValue V2,
10088 ArrayRef<int> Mask,
10089 SelectionDAG &DAG) {
10090 // FIXME: This should probably be generalized for 512-bit vectors as well.
10091 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10092 int LaneSize = Mask.size() / 2;
10094 // If there are only inputs from one 128-bit lane, splitting will in fact be
10095 // less expensive. The flags track wether the given lane contains an element
10096 // that crosses to another lane.
10097 bool LaneCrossing[2] = {false, false};
10098 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10099 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10100 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10101 if (!LaneCrossing[0] || !LaneCrossing[1])
10102 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10104 if (isSingleInputShuffleMask(Mask)) {
10105 SmallVector<int, 32> FlippedBlendMask;
10106 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10107 FlippedBlendMask.push_back(
10108 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10110 : Mask[i] % LaneSize +
10111 (i / LaneSize) * LaneSize + Size));
10113 // Flip the vector, and blend the results which should now be in-lane. The
10114 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10115 // 5 for the high source. The value 3 selects the high half of source 2 and
10116 // the value 2 selects the low half of source 2. We only use source 2 to
10117 // allow folding it into a memory operand.
10118 unsigned PERMMask = 3 | 2 << 4;
10119 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10120 V1, DAG.getConstant(PERMMask, MVT::i8));
10121 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10124 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10125 // will be handled by the above logic and a blend of the results, much like
10126 // other patterns in AVX.
10127 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10130 /// \brief Handle lowering 2-lane 128-bit shuffles.
10131 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10132 SDValue V2, ArrayRef<int> Mask,
10133 const X86Subtarget *Subtarget,
10134 SelectionDAG &DAG) {
10135 // Blends are faster and handle all the non-lane-crossing cases.
10136 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10140 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10141 VT.getVectorNumElements() / 2);
10142 // Check for patterns which can be matched with a single insert of a 128-bit
10144 if (isShuffleEquivalent(Mask, 0, 1, 0, 1) ||
10145 isShuffleEquivalent(Mask, 0, 1, 4, 5)) {
10146 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10147 DAG.getIntPtrConstant(0));
10148 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10149 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10150 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10152 if (isShuffleEquivalent(Mask, 0, 1, 6, 7)) {
10153 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10154 DAG.getIntPtrConstant(0));
10155 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10156 DAG.getIntPtrConstant(2));
10157 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10160 // Otherwise form a 128-bit permutation.
10161 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10162 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10163 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10164 DAG.getConstant(PermMask, MVT::i8));
10167 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10168 /// shuffling each lane.
10170 /// This will only succeed when the result of fixing the 128-bit lanes results
10171 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10172 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10173 /// the lane crosses early and then use simpler shuffles within each lane.
10175 /// FIXME: It might be worthwhile at some point to support this without
10176 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10177 /// in x86 only floating point has interesting non-repeating shuffles, and even
10178 /// those are still *marginally* more expensive.
10179 static SDValue lowerVectorShuffleByMerging128BitLanes(
10180 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10181 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10182 assert(!isSingleInputShuffleMask(Mask) &&
10183 "This is only useful with multiple inputs.");
10185 int Size = Mask.size();
10186 int LaneSize = 128 / VT.getScalarSizeInBits();
10187 int NumLanes = Size / LaneSize;
10188 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10190 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10191 // check whether the in-128-bit lane shuffles share a repeating pattern.
10192 SmallVector<int, 4> Lanes;
10193 Lanes.resize(NumLanes, -1);
10194 SmallVector<int, 4> InLaneMask;
10195 InLaneMask.resize(LaneSize, -1);
10196 for (int i = 0; i < Size; ++i) {
10200 int j = i / LaneSize;
10202 if (Lanes[j] < 0) {
10203 // First entry we've seen for this lane.
10204 Lanes[j] = Mask[i] / LaneSize;
10205 } else if (Lanes[j] != Mask[i] / LaneSize) {
10206 // This doesn't match the lane selected previously!
10210 // Check that within each lane we have a consistent shuffle mask.
10211 int k = i % LaneSize;
10212 if (InLaneMask[k] < 0) {
10213 InLaneMask[k] = Mask[i] % LaneSize;
10214 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10215 // This doesn't fit a repeating in-lane mask.
10220 // First shuffle the lanes into place.
10221 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10222 VT.getSizeInBits() / 64);
10223 SmallVector<int, 8> LaneMask;
10224 LaneMask.resize(NumLanes * 2, -1);
10225 for (int i = 0; i < NumLanes; ++i)
10226 if (Lanes[i] >= 0) {
10227 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10228 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10231 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10232 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10233 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10235 // Cast it back to the type we actually want.
10236 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10238 // Now do a simple shuffle that isn't lane crossing.
10239 SmallVector<int, 8> NewMask;
10240 NewMask.resize(Size, -1);
10241 for (int i = 0; i < Size; ++i)
10243 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10244 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10245 "Must not introduce lane crosses at this point!");
10247 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10250 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10253 /// This returns true if the elements from a particular input are already in the
10254 /// slot required by the given mask and require no permutation.
10255 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10256 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10257 int Size = Mask.size();
10258 for (int i = 0; i < Size; ++i)
10259 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10265 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10267 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10268 /// isn't available.
10269 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10270 const X86Subtarget *Subtarget,
10271 SelectionDAG &DAG) {
10273 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10274 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10275 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10276 ArrayRef<int> Mask = SVOp->getMask();
10277 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10279 SmallVector<int, 4> WidenedMask;
10280 if (canWidenShuffleElements(Mask, WidenedMask))
10281 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10284 if (isSingleInputShuffleMask(Mask)) {
10285 // Check for being able to broadcast a single element.
10286 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10287 Mask, Subtarget, DAG))
10290 // Use low duplicate instructions for masks that match their pattern.
10291 if (isShuffleEquivalent(Mask, 0, 0, 2, 2))
10292 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10294 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10295 // Non-half-crossing single input shuffles can be lowerid with an
10296 // interleaved permutation.
10297 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10298 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10299 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10300 DAG.getConstant(VPERMILPMask, MVT::i8));
10303 // With AVX2 we have direct support for this permutation.
10304 if (Subtarget->hasAVX2())
10305 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10306 getV4X86ShuffleImm8ForMask(Mask, DAG));
10308 // Otherwise, fall back.
10309 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10313 // X86 has dedicated unpack instructions that can handle specific blend
10314 // operations: UNPCKH and UNPCKL.
10315 if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
10316 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10317 if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
10318 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10320 // If we have a single input to the zero element, insert that into V1 if we
10321 // can do so cheaply.
10322 int NumV2Elements =
10323 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10324 if (NumV2Elements == 1 && Mask[0] >= 4)
10325 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10326 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10329 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10333 // Check if the blend happens to exactly fit that of SHUFPD.
10334 if ((Mask[0] == -1 || Mask[0] < 2) &&
10335 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10336 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10337 (Mask[3] == -1 || Mask[3] >= 6)) {
10338 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10339 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10340 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10341 DAG.getConstant(SHUFPDMask, MVT::i8));
10343 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10344 (Mask[1] == -1 || Mask[1] < 2) &&
10345 (Mask[2] == -1 || Mask[2] >= 6) &&
10346 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10347 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10348 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10349 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10350 DAG.getConstant(SHUFPDMask, MVT::i8));
10353 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10354 // shuffle. However, if we have AVX2 and either inputs are already in place,
10355 // we will be able to shuffle even across lanes the other input in a single
10356 // instruction so skip this pattern.
10357 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10358 isShuffleMaskInputInPlace(1, Mask))))
10359 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10360 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10363 // If we have AVX2 then we always want to lower with a blend because an v4 we
10364 // can fully permute the elements.
10365 if (Subtarget->hasAVX2())
10366 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10369 // Otherwise fall back on generic lowering.
10370 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10373 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10375 /// This routine is only called when we have AVX2 and thus a reasonable
10376 /// instruction set for v4i64 shuffling..
10377 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10378 const X86Subtarget *Subtarget,
10379 SelectionDAG &DAG) {
10381 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10382 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10383 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10384 ArrayRef<int> Mask = SVOp->getMask();
10385 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10386 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10388 SmallVector<int, 4> WidenedMask;
10389 if (canWidenShuffleElements(Mask, WidenedMask))
10390 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10393 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10397 // Check for being able to broadcast a single element.
10398 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10399 Mask, Subtarget, DAG))
10402 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10403 // use lower latency instructions that will operate on both 128-bit lanes.
10404 SmallVector<int, 2> RepeatedMask;
10405 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10406 if (isSingleInputShuffleMask(Mask)) {
10407 int PSHUFDMask[] = {-1, -1, -1, -1};
10408 for (int i = 0; i < 2; ++i)
10409 if (RepeatedMask[i] >= 0) {
10410 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10411 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10413 return DAG.getNode(
10414 ISD::BITCAST, DL, MVT::v4i64,
10415 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10416 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10417 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10420 // Use dedicated unpack instructions for masks that match their pattern.
10421 if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
10422 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10423 if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
10424 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10427 // AVX2 provides a direct instruction for permuting a single input across
10429 if (isSingleInputShuffleMask(Mask))
10430 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10431 getV4X86ShuffleImm8ForMask(Mask, DAG));
10433 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10434 // shuffle. However, if we have AVX2 and either inputs are already in place,
10435 // we will be able to shuffle even across lanes the other input in a single
10436 // instruction so skip this pattern.
10437 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10438 isShuffleMaskInputInPlace(1, Mask))))
10439 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10440 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10443 // Otherwise fall back on generic blend lowering.
10444 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10448 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10450 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10451 /// isn't available.
10452 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10453 const X86Subtarget *Subtarget,
10454 SelectionDAG &DAG) {
10456 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10457 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10458 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10459 ArrayRef<int> Mask = SVOp->getMask();
10460 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10462 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10466 // Check for being able to broadcast a single element.
10467 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10468 Mask, Subtarget, DAG))
10471 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10472 // options to efficiently lower the shuffle.
10473 SmallVector<int, 4> RepeatedMask;
10474 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10475 assert(RepeatedMask.size() == 4 &&
10476 "Repeated masks must be half the mask width!");
10478 // Use even/odd duplicate instructions for masks that match their pattern.
10479 if (isShuffleEquivalent(Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10480 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10481 if (isShuffleEquivalent(Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10482 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10484 if (isSingleInputShuffleMask(Mask))
10485 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10486 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10488 // Use dedicated unpack instructions for masks that match their pattern.
10489 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10490 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10491 if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10492 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10494 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10495 // have already handled any direct blends. We also need to squash the
10496 // repeated mask into a simulated v4f32 mask.
10497 for (int i = 0; i < 4; ++i)
10498 if (RepeatedMask[i] >= 8)
10499 RepeatedMask[i] -= 4;
10500 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10503 // If we have a single input shuffle with different shuffle patterns in the
10504 // two 128-bit lanes use the variable mask to VPERMILPS.
10505 if (isSingleInputShuffleMask(Mask)) {
10506 SDValue VPermMask[8];
10507 for (int i = 0; i < 8; ++i)
10508 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10509 : DAG.getConstant(Mask[i], MVT::i32);
10510 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10511 return DAG.getNode(
10512 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10513 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10515 if (Subtarget->hasAVX2())
10516 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10517 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10518 DAG.getNode(ISD::BUILD_VECTOR, DL,
10519 MVT::v8i32, VPermMask)),
10522 // Otherwise, fall back.
10523 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10527 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10529 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10530 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10533 // If we have AVX2 then we always want to lower with a blend because at v8 we
10534 // can fully permute the elements.
10535 if (Subtarget->hasAVX2())
10536 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10539 // Otherwise fall back on generic lowering.
10540 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10543 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10545 /// This routine is only called when we have AVX2 and thus a reasonable
10546 /// instruction set for v8i32 shuffling..
10547 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10548 const X86Subtarget *Subtarget,
10549 SelectionDAG &DAG) {
10551 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10552 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10553 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10554 ArrayRef<int> Mask = SVOp->getMask();
10555 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10556 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10558 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10562 // Check for being able to broadcast a single element.
10563 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10564 Mask, Subtarget, DAG))
10567 // If the shuffle mask is repeated in each 128-bit lane we can use more
10568 // efficient instructions that mirror the shuffles across the two 128-bit
10570 SmallVector<int, 4> RepeatedMask;
10571 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10572 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10573 if (isSingleInputShuffleMask(Mask))
10574 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10575 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10577 // Use dedicated unpack instructions for masks that match their pattern.
10578 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10579 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10580 if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10581 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10584 // If the shuffle patterns aren't repeated but it is a single input, directly
10585 // generate a cross-lane VPERMD instruction.
10586 if (isSingleInputShuffleMask(Mask)) {
10587 SDValue VPermMask[8];
10588 for (int i = 0; i < 8; ++i)
10589 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10590 : DAG.getConstant(Mask[i], MVT::i32);
10591 return DAG.getNode(
10592 X86ISD::VPERMV, DL, MVT::v8i32,
10593 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10596 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10598 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10599 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10602 // Otherwise fall back on generic blend lowering.
10603 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10607 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10609 /// This routine is only called when we have AVX2 and thus a reasonable
10610 /// instruction set for v16i16 shuffling..
10611 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10612 const X86Subtarget *Subtarget,
10613 SelectionDAG &DAG) {
10615 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10616 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10617 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10618 ArrayRef<int> Mask = SVOp->getMask();
10619 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10620 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10622 // Check for being able to broadcast a single element.
10623 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
10624 Mask, Subtarget, DAG))
10627 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10631 // Use dedicated unpack instructions for masks that match their pattern.
10632 if (isShuffleEquivalent(Mask,
10633 // First 128-bit lane:
10634 0, 16, 1, 17, 2, 18, 3, 19,
10635 // Second 128-bit lane:
10636 8, 24, 9, 25, 10, 26, 11, 27))
10637 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
10638 if (isShuffleEquivalent(Mask,
10639 // First 128-bit lane:
10640 4, 20, 5, 21, 6, 22, 7, 23,
10641 // Second 128-bit lane:
10642 12, 28, 13, 29, 14, 30, 15, 31))
10643 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
10645 if (isSingleInputShuffleMask(Mask)) {
10646 // There are no generalized cross-lane shuffle operations available on i16
10648 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
10649 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
10652 SDValue PSHUFBMask[32];
10653 for (int i = 0; i < 16; ++i) {
10654 if (Mask[i] == -1) {
10655 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
10659 int M = i < 8 ? Mask[i] : Mask[i] - 8;
10660 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
10661 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
10662 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
10664 return DAG.getNode(
10665 ISD::BITCAST, DL, MVT::v16i16,
10667 X86ISD::PSHUFB, DL, MVT::v32i8,
10668 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
10669 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
10672 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10674 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10675 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10678 // Otherwise fall back on generic lowering.
10679 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
10682 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
10684 /// This routine is only called when we have AVX2 and thus a reasonable
10685 /// instruction set for v32i8 shuffling..
10686 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10687 const X86Subtarget *Subtarget,
10688 SelectionDAG &DAG) {
10690 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10691 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10692 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10693 ArrayRef<int> Mask = SVOp->getMask();
10694 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10695 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
10697 // Check for being able to broadcast a single element.
10698 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
10699 Mask, Subtarget, DAG))
10702 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
10706 // Use dedicated unpack instructions for masks that match their pattern.
10707 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
10709 if (isShuffleEquivalent(
10711 // First 128-bit lane:
10712 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
10713 // Second 128-bit lane:
10714 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
10715 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
10716 if (isShuffleEquivalent(
10718 // First 128-bit lane:
10719 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
10720 // Second 128-bit lane:
10721 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
10722 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
10724 if (isSingleInputShuffleMask(Mask)) {
10725 // There are no generalized cross-lane shuffle operations available on i8
10727 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
10728 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
10731 SDValue PSHUFBMask[32];
10732 for (int i = 0; i < 32; ++i)
10735 ? DAG.getUNDEF(MVT::i8)
10736 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
10738 return DAG.getNode(
10739 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
10740 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
10743 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10745 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10746 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
10749 // Otherwise fall back on generic lowering.
10750 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
10753 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
10755 /// This routine either breaks down the specific type of a 256-bit x86 vector
10756 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
10757 /// together based on the available instructions.
10758 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10759 MVT VT, const X86Subtarget *Subtarget,
10760 SelectionDAG &DAG) {
10762 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10763 ArrayRef<int> Mask = SVOp->getMask();
10765 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
10766 // check for those subtargets here and avoid much of the subtarget querying in
10767 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
10768 // ability to manipulate a 256-bit vector with integer types. Since we'll use
10769 // floating point types there eventually, just immediately cast everything to
10770 // a float and operate entirely in that domain.
10771 if (VT.isInteger() && !Subtarget->hasAVX2()) {
10772 int ElementBits = VT.getScalarSizeInBits();
10773 if (ElementBits < 32)
10774 // No floating point type available, decompose into 128-bit vectors.
10775 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10777 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
10778 VT.getVectorNumElements());
10779 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
10780 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
10781 return DAG.getNode(ISD::BITCAST, DL, VT,
10782 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
10785 switch (VT.SimpleTy) {
10787 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10789 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10791 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10793 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10795 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10797 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10800 llvm_unreachable("Not a valid 256-bit x86 vector type!");
10804 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
10805 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10806 const X86Subtarget *Subtarget,
10807 SelectionDAG &DAG) {
10809 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
10810 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
10811 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10812 ArrayRef<int> Mask = SVOp->getMask();
10813 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10815 // X86 has dedicated unpack instructions that can handle specific blend
10816 // operations: UNPCKH and UNPCKL.
10817 if (isShuffleEquivalent(Mask, 0, 8, 2, 10, 4, 12, 6, 14))
10818 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
10819 if (isShuffleEquivalent(Mask, 1, 9, 3, 11, 5, 13, 7, 15))
10820 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
10822 // FIXME: Implement direct support for this type!
10823 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
10826 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
10827 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10828 const X86Subtarget *Subtarget,
10829 SelectionDAG &DAG) {
10831 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
10832 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
10833 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10834 ArrayRef<int> Mask = SVOp->getMask();
10835 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10837 // Use dedicated unpack instructions for masks that match their pattern.
10838 if (isShuffleEquivalent(Mask,
10839 0, 16, 1, 17, 4, 20, 5, 21,
10840 8, 24, 9, 25, 12, 28, 13, 29))
10841 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
10842 if (isShuffleEquivalent(Mask,
10843 2, 18, 3, 19, 6, 22, 7, 23,
10844 10, 26, 11, 27, 14, 30, 15, 31))
10845 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
10847 // FIXME: Implement direct support for this type!
10848 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
10851 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
10852 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10853 const X86Subtarget *Subtarget,
10854 SelectionDAG &DAG) {
10856 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
10857 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
10858 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10859 ArrayRef<int> Mask = SVOp->getMask();
10860 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10862 // X86 has dedicated unpack instructions that can handle specific blend
10863 // operations: UNPCKH and UNPCKL.
10864 if (isShuffleEquivalent(Mask, 0, 8, 2, 10, 4, 12, 6, 14))
10865 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
10866 if (isShuffleEquivalent(Mask, 1, 9, 3, 11, 5, 13, 7, 15))
10867 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
10869 // FIXME: Implement direct support for this type!
10870 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
10873 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
10874 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10875 const X86Subtarget *Subtarget,
10876 SelectionDAG &DAG) {
10878 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
10879 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
10880 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10881 ArrayRef<int> Mask = SVOp->getMask();
10882 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10884 // Use dedicated unpack instructions for masks that match their pattern.
10885 if (isShuffleEquivalent(Mask,
10886 0, 16, 1, 17, 4, 20, 5, 21,
10887 8, 24, 9, 25, 12, 28, 13, 29))
10888 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
10889 if (isShuffleEquivalent(Mask,
10890 2, 18, 3, 19, 6, 22, 7, 23,
10891 10, 26, 11, 27, 14, 30, 15, 31))
10892 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
10894 // FIXME: Implement direct support for this type!
10895 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
10898 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
10899 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10900 const X86Subtarget *Subtarget,
10901 SelectionDAG &DAG) {
10903 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
10904 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
10905 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10906 ArrayRef<int> Mask = SVOp->getMask();
10907 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10908 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
10910 // FIXME: Implement direct support for this type!
10911 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
10914 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
10915 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10916 const X86Subtarget *Subtarget,
10917 SelectionDAG &DAG) {
10919 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
10920 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
10921 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10922 ArrayRef<int> Mask = SVOp->getMask();
10923 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
10924 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
10926 // FIXME: Implement direct support for this type!
10927 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
10930 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
10932 /// This routine either breaks down the specific type of a 512-bit x86 vector
10933 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
10934 /// together based on the available instructions.
10935 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10936 MVT VT, const X86Subtarget *Subtarget,
10937 SelectionDAG &DAG) {
10939 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10940 ArrayRef<int> Mask = SVOp->getMask();
10941 assert(Subtarget->hasAVX512() &&
10942 "Cannot lower 512-bit vectors w/ basic ISA!");
10944 // Check for being able to broadcast a single element.
10945 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
10946 Mask, Subtarget, DAG))
10949 // Dispatch to each element type for lowering. If we don't have supprot for
10950 // specific element type shuffles at 512 bits, immediately split them and
10951 // lower them. Each lowering routine of a given type is allowed to assume that
10952 // the requisite ISA extensions for that element type are available.
10953 switch (VT.SimpleTy) {
10955 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10957 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10959 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10961 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10963 if (Subtarget->hasBWI())
10964 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10967 if (Subtarget->hasBWI())
10968 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10972 llvm_unreachable("Not a valid 512-bit x86 vector type!");
10975 // Otherwise fall back on splitting.
10976 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10979 /// \brief Top-level lowering for x86 vector shuffles.
10981 /// This handles decomposition, canonicalization, and lowering of all x86
10982 /// vector shuffles. Most of the specific lowering strategies are encapsulated
10983 /// above in helper routines. The canonicalization attempts to widen shuffles
10984 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
10985 /// s.t. only one of the two inputs needs to be tested, etc.
10986 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
10987 SelectionDAG &DAG) {
10988 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10989 ArrayRef<int> Mask = SVOp->getMask();
10990 SDValue V1 = Op.getOperand(0);
10991 SDValue V2 = Op.getOperand(1);
10992 MVT VT = Op.getSimpleValueType();
10993 int NumElements = VT.getVectorNumElements();
10996 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
10998 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
10999 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11000 if (V1IsUndef && V2IsUndef)
11001 return DAG.getUNDEF(VT);
11003 // When we create a shuffle node we put the UNDEF node to second operand,
11004 // but in some cases the first operand may be transformed to UNDEF.
11005 // In this case we should just commute the node.
11007 return DAG.getCommutedVectorShuffle(*SVOp);
11009 // Check for non-undef masks pointing at an undef vector and make the masks
11010 // undef as well. This makes it easier to match the shuffle based solely on
11014 if (M >= NumElements) {
11015 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11016 for (int &M : NewMask)
11017 if (M >= NumElements)
11019 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11022 // Try to collapse shuffles into using a vector type with fewer elements but
11023 // wider element types. We cap this to not form integers or floating point
11024 // elements wider than 64 bits, but it might be interesting to form i128
11025 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11026 SmallVector<int, 16> WidenedMask;
11027 if (VT.getScalarSizeInBits() < 64 &&
11028 canWidenShuffleElements(Mask, WidenedMask)) {
11029 MVT NewEltVT = VT.isFloatingPoint()
11030 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11031 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11032 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11033 // Make sure that the new vector type is legal. For example, v2f64 isn't
11035 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11036 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11037 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11038 return DAG.getNode(ISD::BITCAST, dl, VT,
11039 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11043 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11044 for (int M : SVOp->getMask())
11046 ++NumUndefElements;
11047 else if (M < NumElements)
11052 // Commute the shuffle as needed such that more elements come from V1 than
11053 // V2. This allows us to match the shuffle pattern strictly on how many
11054 // elements come from V1 without handling the symmetric cases.
11055 if (NumV2Elements > NumV1Elements)
11056 return DAG.getCommutedVectorShuffle(*SVOp);
11058 // When the number of V1 and V2 elements are the same, try to minimize the
11059 // number of uses of V2 in the low half of the vector. When that is tied,
11060 // ensure that the sum of indices for V1 is equal to or lower than the sum
11061 // indices for V2. When those are equal, try to ensure that the number of odd
11062 // indices for V1 is lower than the number of odd indices for V2.
11063 if (NumV1Elements == NumV2Elements) {
11064 int LowV1Elements = 0, LowV2Elements = 0;
11065 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11066 if (M >= NumElements)
11070 if (LowV2Elements > LowV1Elements) {
11071 return DAG.getCommutedVectorShuffle(*SVOp);
11072 } else if (LowV2Elements == LowV1Elements) {
11073 int SumV1Indices = 0, SumV2Indices = 0;
11074 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11075 if (SVOp->getMask()[i] >= NumElements)
11077 else if (SVOp->getMask()[i] >= 0)
11079 if (SumV2Indices < SumV1Indices) {
11080 return DAG.getCommutedVectorShuffle(*SVOp);
11081 } else if (SumV2Indices == SumV1Indices) {
11082 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11083 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11084 if (SVOp->getMask()[i] >= NumElements)
11085 NumV2OddIndices += i % 2;
11086 else if (SVOp->getMask()[i] >= 0)
11087 NumV1OddIndices += i % 2;
11088 if (NumV2OddIndices < NumV1OddIndices)
11089 return DAG.getCommutedVectorShuffle(*SVOp);
11094 // For each vector width, delegate to a specialized lowering routine.
11095 if (VT.getSizeInBits() == 128)
11096 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11098 if (VT.getSizeInBits() == 256)
11099 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11101 // Force AVX-512 vectors to be scalarized for now.
11102 // FIXME: Implement AVX-512 support!
11103 if (VT.getSizeInBits() == 512)
11104 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11106 llvm_unreachable("Unimplemented!");
11110 //===----------------------------------------------------------------------===//
11111 // Legacy vector shuffle lowering
11113 // This code is the legacy code handling vector shuffles until the above
11114 // replaces its functionality and performance.
11115 //===----------------------------------------------------------------------===//
11117 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11118 bool hasInt256, unsigned *MaskOut = nullptr) {
11119 MVT EltVT = VT.getVectorElementType();
11121 // There is no blend with immediate in AVX-512.
11122 if (VT.is512BitVector())
11125 if (!hasSSE41 || EltVT == MVT::i8)
11127 if (!hasInt256 && VT == MVT::v16i16)
11130 unsigned MaskValue = 0;
11131 unsigned NumElems = VT.getVectorNumElements();
11132 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11133 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11134 unsigned NumElemsInLane = NumElems / NumLanes;
11136 // Blend for v16i16 should be symetric for the both lanes.
11137 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11139 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11140 int EltIdx = MaskVals[i];
11142 if ((EltIdx < 0 || EltIdx == (int)i) &&
11143 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11146 if (((unsigned)EltIdx == (i + NumElems)) &&
11147 (SndLaneEltIdx < 0 ||
11148 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11149 MaskValue |= (1 << i);
11155 *MaskOut = MaskValue;
11159 // Try to lower a shuffle node into a simple blend instruction.
11160 // This function assumes isBlendMask returns true for this
11161 // SuffleVectorSDNode
11162 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11163 unsigned MaskValue,
11164 const X86Subtarget *Subtarget,
11165 SelectionDAG &DAG) {
11166 MVT VT = SVOp->getSimpleValueType(0);
11167 MVT EltVT = VT.getVectorElementType();
11168 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11169 Subtarget->hasInt256() && "Trying to lower a "
11170 "VECTOR_SHUFFLE to a Blend but "
11171 "with the wrong mask"));
11172 SDValue V1 = SVOp->getOperand(0);
11173 SDValue V2 = SVOp->getOperand(1);
11175 unsigned NumElems = VT.getVectorNumElements();
11177 // Convert i32 vectors to floating point if it is not AVX2.
11178 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11180 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11181 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11183 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11184 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11187 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11188 DAG.getConstant(MaskValue, MVT::i32));
11189 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11192 /// In vector type \p VT, return true if the element at index \p InputIdx
11193 /// falls on a different 128-bit lane than \p OutputIdx.
11194 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11195 unsigned OutputIdx) {
11196 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11197 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11200 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11201 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11202 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11203 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11205 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11206 SelectionDAG &DAG) {
11207 MVT VT = V1.getSimpleValueType();
11208 assert(VT.is128BitVector() || VT.is256BitVector());
11210 MVT EltVT = VT.getVectorElementType();
11211 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11212 unsigned NumElts = VT.getVectorNumElements();
11214 SmallVector<SDValue, 32> PshufbMask;
11215 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11216 int InputIdx = MaskVals[OutputIdx];
11217 unsigned InputByteIdx;
11219 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11220 InputByteIdx = 0x80;
11222 // Cross lane is not allowed.
11223 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11225 InputByteIdx = InputIdx * EltSizeInBytes;
11226 // Index is an byte offset within the 128-bit lane.
11227 InputByteIdx &= 0xf;
11230 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11231 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11232 if (InputByteIdx != 0x80)
11237 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11239 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11240 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11241 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11244 // v8i16 shuffles - Prefer shuffles in the following order:
11245 // 1. [all] pshuflw, pshufhw, optional move
11246 // 2. [ssse3] 1 x pshufb
11247 // 3. [ssse3] 2 x pshufb + 1 x por
11248 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11250 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11251 SelectionDAG &DAG) {
11252 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11253 SDValue V1 = SVOp->getOperand(0);
11254 SDValue V2 = SVOp->getOperand(1);
11256 SmallVector<int, 8> MaskVals;
11258 // Determine if more than 1 of the words in each of the low and high quadwords
11259 // of the result come from the same quadword of one of the two inputs. Undef
11260 // mask values count as coming from any quadword, for better codegen.
11262 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11263 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11264 unsigned LoQuad[] = { 0, 0, 0, 0 };
11265 unsigned HiQuad[] = { 0, 0, 0, 0 };
11266 // Indices of quads used.
11267 std::bitset<4> InputQuads;
11268 for (unsigned i = 0; i < 8; ++i) {
11269 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11270 int EltIdx = SVOp->getMaskElt(i);
11271 MaskVals.push_back(EltIdx);
11279 ++Quad[EltIdx / 4];
11280 InputQuads.set(EltIdx / 4);
11283 int BestLoQuad = -1;
11284 unsigned MaxQuad = 1;
11285 for (unsigned i = 0; i < 4; ++i) {
11286 if (LoQuad[i] > MaxQuad) {
11288 MaxQuad = LoQuad[i];
11292 int BestHiQuad = -1;
11294 for (unsigned i = 0; i < 4; ++i) {
11295 if (HiQuad[i] > MaxQuad) {
11297 MaxQuad = HiQuad[i];
11301 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11302 // of the two input vectors, shuffle them into one input vector so only a
11303 // single pshufb instruction is necessary. If there are more than 2 input
11304 // quads, disable the next transformation since it does not help SSSE3.
11305 bool V1Used = InputQuads[0] || InputQuads[1];
11306 bool V2Used = InputQuads[2] || InputQuads[3];
11307 if (Subtarget->hasSSSE3()) {
11308 if (InputQuads.count() == 2 && V1Used && V2Used) {
11309 BestLoQuad = InputQuads[0] ? 0 : 1;
11310 BestHiQuad = InputQuads[2] ? 2 : 3;
11312 if (InputQuads.count() > 2) {
11318 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11319 // the shuffle mask. If a quad is scored as -1, that means that it contains
11320 // words from all 4 input quadwords.
11322 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11324 BestLoQuad < 0 ? 0 : BestLoQuad,
11325 BestHiQuad < 0 ? 1 : BestHiQuad
11327 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11328 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11329 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11330 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11332 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11333 // source words for the shuffle, to aid later transformations.
11334 bool AllWordsInNewV = true;
11335 bool InOrder[2] = { true, true };
11336 for (unsigned i = 0; i != 8; ++i) {
11337 int idx = MaskVals[i];
11339 InOrder[i/4] = false;
11340 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11342 AllWordsInNewV = false;
11346 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11347 if (AllWordsInNewV) {
11348 for (int i = 0; i != 8; ++i) {
11349 int idx = MaskVals[i];
11352 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11353 if ((idx != i) && idx < 4)
11355 if ((idx != i) && idx > 3)
11364 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11365 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11366 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11367 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11368 unsigned TargetMask = 0;
11369 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11370 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11371 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11372 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11373 getShufflePSHUFLWImmediate(SVOp);
11374 V1 = NewV.getOperand(0);
11375 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11379 // Promote splats to a larger type which usually leads to more efficient code.
11380 // FIXME: Is this true if pshufb is available?
11381 if (SVOp->isSplat())
11382 return PromoteSplat(SVOp, DAG);
11384 // If we have SSSE3, and all words of the result are from 1 input vector,
11385 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11386 // is present, fall back to case 4.
11387 if (Subtarget->hasSSSE3()) {
11388 SmallVector<SDValue,16> pshufbMask;
11390 // If we have elements from both input vectors, set the high bit of the
11391 // shuffle mask element to zero out elements that come from V2 in the V1
11392 // mask, and elements that come from V1 in the V2 mask, so that the two
11393 // results can be OR'd together.
11394 bool TwoInputs = V1Used && V2Used;
11395 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11397 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11399 // Calculate the shuffle mask for the second input, shuffle it, and
11400 // OR it with the first shuffled input.
11401 CommuteVectorShuffleMask(MaskVals, 8);
11402 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11403 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11404 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11407 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11408 // and update MaskVals with new element order.
11409 std::bitset<8> InOrder;
11410 if (BestLoQuad >= 0) {
11411 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11412 for (int i = 0; i != 4; ++i) {
11413 int idx = MaskVals[i];
11416 } else if ((idx / 4) == BestLoQuad) {
11417 MaskV[i] = idx & 3;
11421 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11424 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11425 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11426 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11427 NewV.getOperand(0),
11428 getShufflePSHUFLWImmediate(SVOp), DAG);
11432 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11433 // and update MaskVals with the new element order.
11434 if (BestHiQuad >= 0) {
11435 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11436 for (unsigned i = 4; i != 8; ++i) {
11437 int idx = MaskVals[i];
11440 } else if ((idx / 4) == BestHiQuad) {
11441 MaskV[i] = (idx & 3) + 4;
11445 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11448 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11449 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11450 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11451 NewV.getOperand(0),
11452 getShufflePSHUFHWImmediate(SVOp), DAG);
11456 // In case BestHi & BestLo were both -1, which means each quadword has a word
11457 // from each of the four input quadwords, calculate the InOrder bitvector now
11458 // before falling through to the insert/extract cleanup.
11459 if (BestLoQuad == -1 && BestHiQuad == -1) {
11461 for (int i = 0; i != 8; ++i)
11462 if (MaskVals[i] < 0 || MaskVals[i] == i)
11466 // The other elements are put in the right place using pextrw and pinsrw.
11467 for (unsigned i = 0; i != 8; ++i) {
11470 int EltIdx = MaskVals[i];
11473 SDValue ExtOp = (EltIdx < 8) ?
11474 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11475 DAG.getIntPtrConstant(EltIdx)) :
11476 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11477 DAG.getIntPtrConstant(EltIdx - 8));
11478 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11479 DAG.getIntPtrConstant(i));
11484 /// \brief v16i16 shuffles
11486 /// FIXME: We only support generation of a single pshufb currently. We can
11487 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11488 /// well (e.g 2 x pshufb + 1 x por).
11490 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11491 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11492 SDValue V1 = SVOp->getOperand(0);
11493 SDValue V2 = SVOp->getOperand(1);
11496 if (V2.getOpcode() != ISD::UNDEF)
11499 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11500 return getPSHUFB(MaskVals, V1, dl, DAG);
11503 // v16i8 shuffles - Prefer shuffles in the following order:
11504 // 1. [ssse3] 1 x pshufb
11505 // 2. [ssse3] 2 x pshufb + 1 x por
11506 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11507 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11508 const X86Subtarget* Subtarget,
11509 SelectionDAG &DAG) {
11510 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11511 SDValue V1 = SVOp->getOperand(0);
11512 SDValue V2 = SVOp->getOperand(1);
11514 ArrayRef<int> MaskVals = SVOp->getMask();
11516 // Promote splats to a larger type which usually leads to more efficient code.
11517 // FIXME: Is this true if pshufb is available?
11518 if (SVOp->isSplat())
11519 return PromoteSplat(SVOp, DAG);
11521 // If we have SSSE3, case 1 is generated when all result bytes come from
11522 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11523 // present, fall back to case 3.
11525 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11526 if (Subtarget->hasSSSE3()) {
11527 SmallVector<SDValue,16> pshufbMask;
11529 // If all result elements are from one input vector, then only translate
11530 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11532 // Otherwise, we have elements from both input vectors, and must zero out
11533 // elements that come from V2 in the first mask, and V1 in the second mask
11534 // so that we can OR them together.
11535 for (unsigned i = 0; i != 16; ++i) {
11536 int EltIdx = MaskVals[i];
11537 if (EltIdx < 0 || EltIdx >= 16)
11539 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11541 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11542 DAG.getNode(ISD::BUILD_VECTOR, dl,
11543 MVT::v16i8, pshufbMask));
11545 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11546 // the 2nd operand if it's undefined or zero.
11547 if (V2.getOpcode() == ISD::UNDEF ||
11548 ISD::isBuildVectorAllZeros(V2.getNode()))
11551 // Calculate the shuffle mask for the second input, shuffle it, and
11552 // OR it with the first shuffled input.
11553 pshufbMask.clear();
11554 for (unsigned i = 0; i != 16; ++i) {
11555 int EltIdx = MaskVals[i];
11556 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11557 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11559 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11560 DAG.getNode(ISD::BUILD_VECTOR, dl,
11561 MVT::v16i8, pshufbMask));
11562 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11565 // No SSSE3 - Calculate in place words and then fix all out of place words
11566 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
11567 // the 16 different words that comprise the two doublequadword input vectors.
11568 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11569 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
11571 for (int i = 0; i != 8; ++i) {
11572 int Elt0 = MaskVals[i*2];
11573 int Elt1 = MaskVals[i*2+1];
11575 // This word of the result is all undef, skip it.
11576 if (Elt0 < 0 && Elt1 < 0)
11579 // This word of the result is already in the correct place, skip it.
11580 if ((Elt0 == i*2) && (Elt1 == i*2+1))
11583 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
11584 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
11587 // If Elt0 and Elt1 are defined, are consecutive, and can be load
11588 // using a single extract together, load it and store it.
11589 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
11590 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11591 DAG.getIntPtrConstant(Elt1 / 2));
11592 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11593 DAG.getIntPtrConstant(i));
11597 // If Elt1 is defined, extract it from the appropriate source. If the
11598 // source byte is not also odd, shift the extracted word left 8 bits
11599 // otherwise clear the bottom 8 bits if we need to do an or.
11601 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11602 DAG.getIntPtrConstant(Elt1 / 2));
11603 if ((Elt1 & 1) == 0)
11604 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
11606 TLI.getShiftAmountTy(InsElt.getValueType())));
11607 else if (Elt0 >= 0)
11608 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
11609 DAG.getConstant(0xFF00, MVT::i16));
11611 // If Elt0 is defined, extract it from the appropriate source. If the
11612 // source byte is not also even, shift the extracted word right 8 bits. If
11613 // Elt1 was also defined, OR the extracted values together before
11614 // inserting them in the result.
11616 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
11617 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
11618 if ((Elt0 & 1) != 0)
11619 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
11621 TLI.getShiftAmountTy(InsElt0.getValueType())));
11622 else if (Elt1 >= 0)
11623 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
11624 DAG.getConstant(0x00FF, MVT::i16));
11625 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
11628 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11629 DAG.getIntPtrConstant(i));
11631 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
11634 // v32i8 shuffles - Translate to VPSHUFB if possible.
11636 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
11637 const X86Subtarget *Subtarget,
11638 SelectionDAG &DAG) {
11639 MVT VT = SVOp->getSimpleValueType(0);
11640 SDValue V1 = SVOp->getOperand(0);
11641 SDValue V2 = SVOp->getOperand(1);
11643 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11645 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11646 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
11647 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
11649 // VPSHUFB may be generated if
11650 // (1) one of input vector is undefined or zeroinitializer.
11651 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
11652 // And (2) the mask indexes don't cross the 128-bit lane.
11653 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
11654 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
11657 if (V1IsAllZero && !V2IsAllZero) {
11658 CommuteVectorShuffleMask(MaskVals, 32);
11661 return getPSHUFB(MaskVals, V1, dl, DAG);
11664 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
11665 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
11666 /// done when every pair / quad of shuffle mask elements point to elements in
11667 /// the right sequence. e.g.
11668 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
11670 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
11671 SelectionDAG &DAG) {
11672 MVT VT = SVOp->getSimpleValueType(0);
11674 unsigned NumElems = VT.getVectorNumElements();
11677 switch (VT.SimpleTy) {
11678 default: llvm_unreachable("Unexpected!");
11681 return SDValue(SVOp, 0);
11682 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
11683 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
11684 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
11685 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
11686 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
11687 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
11690 SmallVector<int, 8> MaskVec;
11691 for (unsigned i = 0; i != NumElems; i += Scale) {
11693 for (unsigned j = 0; j != Scale; ++j) {
11694 int EltIdx = SVOp->getMaskElt(i+j);
11698 StartIdx = (EltIdx / Scale);
11699 if (EltIdx != (int)(StartIdx*Scale + j))
11702 MaskVec.push_back(StartIdx);
11705 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
11706 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
11707 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
11710 /// getVZextMovL - Return a zero-extending vector move low node.
11712 static SDValue getVZextMovL(MVT VT, MVT OpVT,
11713 SDValue SrcOp, SelectionDAG &DAG,
11714 const X86Subtarget *Subtarget, SDLoc dl) {
11715 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
11716 LoadSDNode *LD = nullptr;
11717 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
11718 LD = dyn_cast<LoadSDNode>(SrcOp);
11720 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
11722 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
11723 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
11724 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
11725 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
11726 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
11728 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
11729 return DAG.getNode(ISD::BITCAST, dl, VT,
11730 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
11731 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
11733 SrcOp.getOperand(0)
11739 return DAG.getNode(ISD::BITCAST, dl, VT,
11740 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
11741 DAG.getNode(ISD::BITCAST, dl,
11745 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
11746 /// which could not be matched by any known target speficic shuffle
11748 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
11750 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
11751 if (NewOp.getNode())
11754 MVT VT = SVOp->getSimpleValueType(0);
11756 unsigned NumElems = VT.getVectorNumElements();
11757 unsigned NumLaneElems = NumElems / 2;
11760 MVT EltVT = VT.getVectorElementType();
11761 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
11764 SmallVector<int, 16> Mask;
11765 for (unsigned l = 0; l < 2; ++l) {
11766 // Build a shuffle mask for the output, discovering on the fly which
11767 // input vectors to use as shuffle operands (recorded in InputUsed).
11768 // If building a suitable shuffle vector proves too hard, then bail
11769 // out with UseBuildVector set.
11770 bool UseBuildVector = false;
11771 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
11772 unsigned LaneStart = l * NumLaneElems;
11773 for (unsigned i = 0; i != NumLaneElems; ++i) {
11774 // The mask element. This indexes into the input.
11775 int Idx = SVOp->getMaskElt(i+LaneStart);
11777 // the mask element does not index into any input vector.
11778 Mask.push_back(-1);
11782 // The input vector this mask element indexes into.
11783 int Input = Idx / NumLaneElems;
11785 // Turn the index into an offset from the start of the input vector.
11786 Idx -= Input * NumLaneElems;
11788 // Find or create a shuffle vector operand to hold this input.
11790 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
11791 if (InputUsed[OpNo] == Input)
11792 // This input vector is already an operand.
11794 if (InputUsed[OpNo] < 0) {
11795 // Create a new operand for this input vector.
11796 InputUsed[OpNo] = Input;
11801 if (OpNo >= array_lengthof(InputUsed)) {
11802 // More than two input vectors used! Give up on trying to create a
11803 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
11804 UseBuildVector = true;
11808 // Add the mask index for the new shuffle vector.
11809 Mask.push_back(Idx + OpNo * NumLaneElems);
11812 if (UseBuildVector) {
11813 SmallVector<SDValue, 16> SVOps;
11814 for (unsigned i = 0; i != NumLaneElems; ++i) {
11815 // The mask element. This indexes into the input.
11816 int Idx = SVOp->getMaskElt(i+LaneStart);
11818 SVOps.push_back(DAG.getUNDEF(EltVT));
11822 // The input vector this mask element indexes into.
11823 int Input = Idx / NumElems;
11825 // Turn the index into an offset from the start of the input vector.
11826 Idx -= Input * NumElems;
11828 // Extract the vector element by hand.
11829 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
11830 SVOp->getOperand(Input),
11831 DAG.getIntPtrConstant(Idx)));
11834 // Construct the output using a BUILD_VECTOR.
11835 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
11836 } else if (InputUsed[0] < 0) {
11837 // No input vectors were used! The result is undefined.
11838 Output[l] = DAG.getUNDEF(NVT);
11840 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
11841 (InputUsed[0] % 2) * NumLaneElems,
11843 // If only one input was used, use an undefined vector for the other.
11844 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
11845 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
11846 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
11847 // At least one input vector was used. Create a new shuffle vector.
11848 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
11854 // Concatenate the result back
11855 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
11858 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
11859 /// 4 elements, and match them with several different shuffle types.
11861 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
11862 SDValue V1 = SVOp->getOperand(0);
11863 SDValue V2 = SVOp->getOperand(1);
11865 MVT VT = SVOp->getSimpleValueType(0);
11867 assert(VT.is128BitVector() && "Unsupported vector size");
11869 std::pair<int, int> Locs[4];
11870 int Mask1[] = { -1, -1, -1, -1 };
11871 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
11873 unsigned NumHi = 0;
11874 unsigned NumLo = 0;
11875 for (unsigned i = 0; i != 4; ++i) {
11876 int Idx = PermMask[i];
11878 Locs[i] = std::make_pair(-1, -1);
11880 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
11882 Locs[i] = std::make_pair(0, NumLo);
11883 Mask1[NumLo] = Idx;
11886 Locs[i] = std::make_pair(1, NumHi);
11888 Mask1[2+NumHi] = Idx;
11894 if (NumLo <= 2 && NumHi <= 2) {
11895 // If no more than two elements come from either vector. This can be
11896 // implemented with two shuffles. First shuffle gather the elements.
11897 // The second shuffle, which takes the first shuffle as both of its
11898 // vector operands, put the elements into the right order.
11899 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
11901 int Mask2[] = { -1, -1, -1, -1 };
11903 for (unsigned i = 0; i != 4; ++i)
11904 if (Locs[i].first != -1) {
11905 unsigned Idx = (i < 2) ? 0 : 4;
11906 Idx += Locs[i].first * 2 + Locs[i].second;
11910 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
11913 if (NumLo == 3 || NumHi == 3) {
11914 // Otherwise, we must have three elements from one vector, call it X, and
11915 // one element from the other, call it Y. First, use a shufps to build an
11916 // intermediate vector with the one element from Y and the element from X
11917 // that will be in the same half in the final destination (the indexes don't
11918 // matter). Then, use a shufps to build the final vector, taking the half
11919 // containing the element from Y from the intermediate, and the other half
11922 // Normalize it so the 3 elements come from V1.
11923 CommuteVectorShuffleMask(PermMask, 4);
11927 // Find the element from V2.
11929 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
11930 int Val = PermMask[HiIndex];
11937 Mask1[0] = PermMask[HiIndex];
11939 Mask1[2] = PermMask[HiIndex^1];
11941 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
11943 if (HiIndex >= 2) {
11944 Mask1[0] = PermMask[0];
11945 Mask1[1] = PermMask[1];
11946 Mask1[2] = HiIndex & 1 ? 6 : 4;
11947 Mask1[3] = HiIndex & 1 ? 4 : 6;
11948 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
11951 Mask1[0] = HiIndex & 1 ? 2 : 0;
11952 Mask1[1] = HiIndex & 1 ? 0 : 2;
11953 Mask1[2] = PermMask[2];
11954 Mask1[3] = PermMask[3];
11959 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
11962 // Break it into (shuffle shuffle_hi, shuffle_lo).
11963 int LoMask[] = { -1, -1, -1, -1 };
11964 int HiMask[] = { -1, -1, -1, -1 };
11966 int *MaskPtr = LoMask;
11967 unsigned MaskIdx = 0;
11968 unsigned LoIdx = 0;
11969 unsigned HiIdx = 2;
11970 for (unsigned i = 0; i != 4; ++i) {
11977 int Idx = PermMask[i];
11979 Locs[i] = std::make_pair(-1, -1);
11980 } else if (Idx < 4) {
11981 Locs[i] = std::make_pair(MaskIdx, LoIdx);
11982 MaskPtr[LoIdx] = Idx;
11985 Locs[i] = std::make_pair(MaskIdx, HiIdx);
11986 MaskPtr[HiIdx] = Idx;
11991 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
11992 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
11993 int MaskOps[] = { -1, -1, -1, -1 };
11994 for (unsigned i = 0; i != 4; ++i)
11995 if (Locs[i].first != -1)
11996 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
11997 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
12000 static bool MayFoldVectorLoad(SDValue V) {
12001 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12002 V = V.getOperand(0);
12004 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12005 V = V.getOperand(0);
12006 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12007 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12008 // BUILD_VECTOR (load), undef
12009 V = V.getOperand(0);
12011 return MayFoldLoad(V);
12015 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12016 MVT VT = Op.getSimpleValueType();
12018 // Canonizalize to v2f64.
12019 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12020 return DAG.getNode(ISD::BITCAST, dl, VT,
12021 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12026 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12028 SDValue V1 = Op.getOperand(0);
12029 SDValue V2 = Op.getOperand(1);
12030 MVT VT = Op.getSimpleValueType();
12032 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12034 if (HasSSE2 && VT == MVT::v2f64)
12035 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12037 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1)
12038 return DAG.getNode(ISD::BITCAST, dl, VT,
12039 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12040 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12041 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12045 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12046 SDValue V1 = Op.getOperand(0);
12047 SDValue V2 = Op.getOperand(1);
12048 MVT VT = Op.getSimpleValueType();
12050 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12051 "unsupported shuffle type");
12053 if (V2.getOpcode() == ISD::UNDEF)
12057 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12061 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12062 SDValue V1 = Op.getOperand(0);
12063 SDValue V2 = Op.getOperand(1);
12064 MVT VT = Op.getSimpleValueType();
12065 unsigned NumElems = VT.getVectorNumElements();
12067 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12068 // operand of these instructions is only memory, so check if there's a
12069 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12071 bool CanFoldLoad = false;
12073 // Trivial case, when V2 comes from a load.
12074 if (MayFoldVectorLoad(V2))
12075 CanFoldLoad = true;
12077 // When V1 is a load, it can be folded later into a store in isel, example:
12078 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12080 // (MOVLPSmr addr:$src1, VR128:$src2)
12081 // So, recognize this potential and also use MOVLPS or MOVLPD
12082 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12083 CanFoldLoad = true;
12085 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12087 if (HasSSE2 && NumElems == 2)
12088 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12091 // If we don't care about the second element, proceed to use movss.
12092 if (SVOp->getMaskElt(1) != -1)
12093 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12096 // movl and movlp will both match v2i64, but v2i64 is never matched by
12097 // movl earlier because we make it strict to avoid messing with the movlp load
12098 // folding logic (see the code above getMOVLP call). Match it here then,
12099 // this is horrible, but will stay like this until we move all shuffle
12100 // matching to x86 specific nodes. Note that for the 1st condition all
12101 // types are matched with movsd.
12103 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12104 // as to remove this logic from here, as much as possible
12105 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12106 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12107 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12110 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12112 // Invert the operand order and use SHUFPS to match it.
12113 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12114 getShuffleSHUFImmediate(SVOp), DAG);
12117 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12118 SelectionDAG &DAG) {
12120 MVT VT = Load->getSimpleValueType(0);
12121 MVT EVT = VT.getVectorElementType();
12122 SDValue Addr = Load->getOperand(1);
12123 SDValue NewAddr = DAG.getNode(
12124 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12125 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12128 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12129 DAG.getMachineFunction().getMachineMemOperand(
12130 Load->getMemOperand(), 0, EVT.getStoreSize()));
12134 // It is only safe to call this function if isINSERTPSMask is true for
12135 // this shufflevector mask.
12136 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12137 SelectionDAG &DAG) {
12138 // Generate an insertps instruction when inserting an f32 from memory onto a
12139 // v4f32 or when copying a member from one v4f32 to another.
12140 // We also use it for transferring i32 from one register to another,
12141 // since it simply copies the same bits.
12142 // If we're transferring an i32 from memory to a specific element in a
12143 // register, we output a generic DAG that will match the PINSRD
12145 MVT VT = SVOp->getSimpleValueType(0);
12146 MVT EVT = VT.getVectorElementType();
12147 SDValue V1 = SVOp->getOperand(0);
12148 SDValue V2 = SVOp->getOperand(1);
12149 auto Mask = SVOp->getMask();
12150 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12151 "unsupported vector type for insertps/pinsrd");
12153 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12154 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12155 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12159 unsigned DestIndex;
12163 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12166 // If we have 1 element from each vector, we have to check if we're
12167 // changing V1's element's place. If so, we're done. Otherwise, we
12168 // should assume we're changing V2's element's place and behave
12170 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12171 assert(DestIndex <= INT32_MAX && "truncated destination index");
12172 if (FromV1 == FromV2 &&
12173 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12177 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12180 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12181 "More than one element from V1 and from V2, or no elements from one "
12182 "of the vectors. This case should not have returned true from "
12187 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12190 // Get an index into the source vector in the range [0,4) (the mask is
12191 // in the range [0,8) because it can address V1 and V2)
12192 unsigned SrcIndex = Mask[DestIndex] % 4;
12193 if (MayFoldLoad(From)) {
12194 // Trivial case, when From comes from a load and is only used by the
12195 // shuffle. Make it use insertps from the vector that we need from that
12198 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12199 if (!NewLoad.getNode())
12202 if (EVT == MVT::f32) {
12203 // Create this as a scalar to vector to match the instruction pattern.
12204 SDValue LoadScalarToVector =
12205 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12206 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12207 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12209 } else { // EVT == MVT::i32
12210 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12211 // instruction, to match the PINSRD instruction, which loads an i32 to a
12212 // certain vector element.
12213 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12214 DAG.getConstant(DestIndex, MVT::i32));
12218 // Vector-element-to-vector
12219 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12220 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12223 // Reduce a vector shuffle to zext.
12224 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12225 SelectionDAG &DAG) {
12226 // PMOVZX is only available from SSE41.
12227 if (!Subtarget->hasSSE41())
12230 MVT VT = Op.getSimpleValueType();
12232 // Only AVX2 support 256-bit vector integer extending.
12233 if (!Subtarget->hasInt256() && VT.is256BitVector())
12236 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12238 SDValue V1 = Op.getOperand(0);
12239 SDValue V2 = Op.getOperand(1);
12240 unsigned NumElems = VT.getVectorNumElements();
12242 // Extending is an unary operation and the element type of the source vector
12243 // won't be equal to or larger than i64.
12244 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12245 VT.getVectorElementType() == MVT::i64)
12248 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12249 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12250 while ((1U << Shift) < NumElems) {
12251 if (SVOp->getMaskElt(1U << Shift) == 1)
12254 // The maximal ratio is 8, i.e. from i8 to i64.
12259 // Check the shuffle mask.
12260 unsigned Mask = (1U << Shift) - 1;
12261 for (unsigned i = 0; i != NumElems; ++i) {
12262 int EltIdx = SVOp->getMaskElt(i);
12263 if ((i & Mask) != 0 && EltIdx != -1)
12265 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12269 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12270 MVT NeVT = MVT::getIntegerVT(NBits);
12271 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12273 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12276 return DAG.getNode(ISD::BITCAST, DL, VT,
12277 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12280 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12281 SelectionDAG &DAG) {
12282 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12283 MVT VT = Op.getSimpleValueType();
12285 SDValue V1 = Op.getOperand(0);
12286 SDValue V2 = Op.getOperand(1);
12288 if (isZeroShuffle(SVOp))
12289 return getZeroVector(VT, Subtarget, DAG, dl);
12291 // Handle splat operations
12292 if (SVOp->isSplat()) {
12293 // Use vbroadcast whenever the splat comes from a foldable load
12294 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12295 if (Broadcast.getNode())
12299 // Check integer expanding shuffles.
12300 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12301 if (NewOp.getNode())
12304 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12306 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12307 VT == MVT::v32i8) {
12308 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12309 if (NewOp.getNode())
12310 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12311 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12312 // FIXME: Figure out a cleaner way to do this.
12313 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12314 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12315 if (NewOp.getNode()) {
12316 MVT NewVT = NewOp.getSimpleValueType();
12317 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12318 NewVT, true, false))
12319 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12322 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12323 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12324 if (NewOp.getNode()) {
12325 MVT NewVT = NewOp.getSimpleValueType();
12326 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12327 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12336 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12337 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12338 SDValue V1 = Op.getOperand(0);
12339 SDValue V2 = Op.getOperand(1);
12340 MVT VT = Op.getSimpleValueType();
12342 unsigned NumElems = VT.getVectorNumElements();
12343 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12344 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12345 bool V1IsSplat = false;
12346 bool V2IsSplat = false;
12347 bool HasSSE2 = Subtarget->hasSSE2();
12348 bool HasFp256 = Subtarget->hasFp256();
12349 bool HasInt256 = Subtarget->hasInt256();
12350 MachineFunction &MF = DAG.getMachineFunction();
12351 bool OptForSize = MF.getFunction()->getAttributes().
12352 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
12354 // Check if we should use the experimental vector shuffle lowering. If so,
12355 // delegate completely to that code path.
12356 if (ExperimentalVectorShuffleLowering)
12357 return lowerVectorShuffle(Op, Subtarget, DAG);
12359 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12361 if (V1IsUndef && V2IsUndef)
12362 return DAG.getUNDEF(VT);
12364 // When we create a shuffle node we put the UNDEF node to second operand,
12365 // but in some cases the first operand may be transformed to UNDEF.
12366 // In this case we should just commute the node.
12368 return DAG.getCommutedVectorShuffle(*SVOp);
12370 // Vector shuffle lowering takes 3 steps:
12372 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12373 // narrowing and commutation of operands should be handled.
12374 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12376 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12377 // so the shuffle can be broken into other shuffles and the legalizer can
12378 // try the lowering again.
12380 // The general idea is that no vector_shuffle operation should be left to
12381 // be matched during isel, all of them must be converted to a target specific
12384 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12385 // narrowing and commutation of operands should be handled. The actual code
12386 // doesn't include all of those, work in progress...
12387 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12388 if (NewOp.getNode())
12391 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12393 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12394 // unpckh_undef). Only use pshufd if speed is more important than size.
12395 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12396 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12397 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12398 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12400 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12401 V2IsUndef && MayFoldVectorLoad(V1))
12402 return getMOVDDup(Op, dl, V1, DAG);
12404 if (isMOVHLPS_v_undef_Mask(M, VT))
12405 return getMOVHighToLow(Op, dl, DAG);
12407 // Use to match splats
12408 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12409 (VT == MVT::v2f64 || VT == MVT::v2i64))
12410 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12412 if (isPSHUFDMask(M, VT)) {
12413 // The actual implementation will match the mask in the if above and then
12414 // during isel it can match several different instructions, not only pshufd
12415 // as its name says, sad but true, emulate the behavior for now...
12416 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12417 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12419 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12421 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12422 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12424 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12425 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12428 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12432 if (isPALIGNRMask(M, VT, Subtarget))
12433 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12434 getShufflePALIGNRImmediate(SVOp),
12437 if (isVALIGNMask(M, VT, Subtarget))
12438 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12439 getShuffleVALIGNImmediate(SVOp),
12442 // Check if this can be converted into a logical shift.
12443 bool isLeft = false;
12444 unsigned ShAmt = 0;
12446 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12447 if (isShift && ShVal.hasOneUse()) {
12448 // If the shifted value has multiple uses, it may be cheaper to use
12449 // v_set0 + movlhps or movhlps, etc.
12450 MVT EltVT = VT.getVectorElementType();
12451 ShAmt *= EltVT.getSizeInBits();
12452 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12455 if (isMOVLMask(M, VT)) {
12456 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12457 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12458 if (!isMOVLPMask(M, VT)) {
12459 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12460 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12462 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12463 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12467 // FIXME: fold these into legal mask.
12468 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12469 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12471 if (isMOVHLPSMask(M, VT))
12472 return getMOVHighToLow(Op, dl, DAG);
12474 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12475 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12477 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12478 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12480 if (isMOVLPMask(M, VT))
12481 return getMOVLP(Op, dl, DAG, HasSSE2);
12483 if (ShouldXformToMOVHLPS(M, VT) ||
12484 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12485 return DAG.getCommutedVectorShuffle(*SVOp);
12488 // No better options. Use a vshldq / vsrldq.
12489 MVT EltVT = VT.getVectorElementType();
12490 ShAmt *= EltVT.getSizeInBits();
12491 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12494 bool Commuted = false;
12495 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12496 // 1,1,1,1 -> v8i16 though.
12497 BitVector UndefElements;
12498 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12499 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12501 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12502 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12505 // Canonicalize the splat or undef, if present, to be on the RHS.
12506 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12507 CommuteVectorShuffleMask(M, NumElems);
12509 std::swap(V1IsSplat, V2IsSplat);
12513 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12514 // Shuffling low element of v1 into undef, just return v1.
12517 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12518 // the instruction selector will not match, so get a canonical MOVL with
12519 // swapped operands to undo the commute.
12520 return getMOVL(DAG, dl, VT, V2, V1);
12523 if (isUNPCKLMask(M, VT, HasInt256))
12524 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12526 if (isUNPCKHMask(M, VT, HasInt256))
12527 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12530 // Normalize mask so all entries that point to V2 points to its first
12531 // element then try to match unpck{h|l} again. If match, return a
12532 // new vector_shuffle with the corrected mask.p
12533 SmallVector<int, 8> NewMask(M.begin(), M.end());
12534 NormalizeMask(NewMask, NumElems);
12535 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12536 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12537 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12538 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12542 // Commute is back and try unpck* again.
12543 // FIXME: this seems wrong.
12544 CommuteVectorShuffleMask(M, NumElems);
12546 std::swap(V1IsSplat, V2IsSplat);
12548 if (isUNPCKLMask(M, VT, HasInt256))
12549 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12551 if (isUNPCKHMask(M, VT, HasInt256))
12552 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12555 // Normalize the node to match x86 shuffle ops if needed
12556 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12557 return DAG.getCommutedVectorShuffle(*SVOp);
12559 // The checks below are all present in isShuffleMaskLegal, but they are
12560 // inlined here right now to enable us to directly emit target specific
12561 // nodes, and remove one by one until they don't return Op anymore.
12563 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
12564 SVOp->getSplatIndex() == 0 && V2IsUndef) {
12565 if (VT == MVT::v2f64 || VT == MVT::v2i64)
12566 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12569 if (isPSHUFHWMask(M, VT, HasInt256))
12570 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
12571 getShufflePSHUFHWImmediate(SVOp),
12574 if (isPSHUFLWMask(M, VT, HasInt256))
12575 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
12576 getShufflePSHUFLWImmediate(SVOp),
12579 unsigned MaskValue;
12580 if (isBlendMask(M, VT, Subtarget->hasSSE41(), Subtarget->hasInt256(),
12582 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
12584 if (isSHUFPMask(M, VT))
12585 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
12586 getShuffleSHUFImmediate(SVOp), DAG);
12588 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12589 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12590 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12591 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12593 //===--------------------------------------------------------------------===//
12594 // Generate target specific nodes for 128 or 256-bit shuffles only
12595 // supported in the AVX instruction set.
12598 // Handle VMOVDDUPY permutations
12599 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
12600 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
12602 // Handle VPERMILPS/D* permutations
12603 if (isVPERMILPMask(M, VT)) {
12604 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
12605 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
12606 getShuffleSHUFImmediate(SVOp), DAG);
12607 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
12608 getShuffleSHUFImmediate(SVOp), DAG);
12612 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
12613 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
12614 Idx*(NumElems/2), DAG, dl);
12616 // Handle VPERM2F128/VPERM2I128 permutations
12617 if (isVPERM2X128Mask(M, VT, HasFp256))
12618 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
12619 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
12621 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
12622 return getINSERTPS(SVOp, dl, DAG);
12625 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
12626 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
12628 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
12629 VT.is512BitVector()) {
12630 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
12631 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
12632 SmallVector<SDValue, 16> permclMask;
12633 for (unsigned i = 0; i != NumElems; ++i) {
12634 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
12637 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
12639 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
12640 return DAG.getNode(X86ISD::VPERMV, dl, VT,
12641 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
12642 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
12643 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
12646 //===--------------------------------------------------------------------===//
12647 // Since no target specific shuffle was selected for this generic one,
12648 // lower it into other known shuffles. FIXME: this isn't true yet, but
12649 // this is the plan.
12652 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
12653 if (VT == MVT::v8i16) {
12654 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
12655 if (NewOp.getNode())
12659 if (VT == MVT::v16i16 && Subtarget->hasInt256()) {
12660 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
12661 if (NewOp.getNode())
12665 if (VT == MVT::v16i8) {
12666 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
12667 if (NewOp.getNode())
12671 if (VT == MVT::v32i8) {
12672 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
12673 if (NewOp.getNode())
12677 // Handle all 128-bit wide vectors with 4 elements, and match them with
12678 // several different shuffle types.
12679 if (NumElems == 4 && VT.is128BitVector())
12680 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
12682 // Handle general 256-bit shuffles
12683 if (VT.is256BitVector())
12684 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
12689 // This function assumes its argument is a BUILD_VECTOR of constants or
12690 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
12692 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
12693 unsigned &MaskValue) {
12695 unsigned NumElems = BuildVector->getNumOperands();
12696 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
12697 unsigned NumLanes = (NumElems - 1) / 8 + 1;
12698 unsigned NumElemsInLane = NumElems / NumLanes;
12700 // Blend for v16i16 should be symetric for the both lanes.
12701 for (unsigned i = 0; i < NumElemsInLane; ++i) {
12702 SDValue EltCond = BuildVector->getOperand(i);
12703 SDValue SndLaneEltCond =
12704 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
12706 int Lane1Cond = -1, Lane2Cond = -1;
12707 if (isa<ConstantSDNode>(EltCond))
12708 Lane1Cond = !isZero(EltCond);
12709 if (isa<ConstantSDNode>(SndLaneEltCond))
12710 Lane2Cond = !isZero(SndLaneEltCond);
12712 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
12713 // Lane1Cond != 0, means we want the first argument.
12714 // Lane1Cond == 0, means we want the second argument.
12715 // The encoding of this argument is 0 for the first argument, 1
12716 // for the second. Therefore, invert the condition.
12717 MaskValue |= !Lane1Cond << i;
12718 else if (Lane1Cond < 0)
12719 MaskValue |= !Lane2Cond << i;
12726 /// \brief Try to lower a VSELECT instruction to an immediate-controlled blend
12728 static SDValue lowerVSELECTtoBLENDI(SDValue Op, const X86Subtarget *Subtarget,
12729 SelectionDAG &DAG) {
12730 SDValue Cond = Op.getOperand(0);
12731 SDValue LHS = Op.getOperand(1);
12732 SDValue RHS = Op.getOperand(2);
12734 MVT VT = Op.getSimpleValueType();
12735 MVT EltVT = VT.getVectorElementType();
12736 unsigned NumElems = VT.getVectorNumElements();
12738 // There is no blend with immediate in AVX-512.
12739 if (VT.is512BitVector())
12742 if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
12744 if (!Subtarget->hasInt256() && VT == MVT::v16i16)
12747 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
12750 // Check the mask for BLEND and build the value.
12751 unsigned MaskValue = 0;
12752 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
12755 // Convert i32 vectors to floating point if it is not AVX2.
12756 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
12758 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
12759 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
12761 LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
12762 RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
12765 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
12766 DAG.getConstant(MaskValue, MVT::i32));
12767 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
12770 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
12771 // A vselect where all conditions and data are constants can be optimized into
12772 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
12773 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
12774 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
12775 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
12778 SDValue BlendOp = lowerVSELECTtoBLENDI(Op, Subtarget, DAG);
12779 if (BlendOp.getNode())
12782 // Some types for vselect were previously set to Expand, not Legal or
12783 // Custom. Return an empty SDValue so we fall-through to Expand, after
12784 // the Custom lowering phase.
12785 MVT VT = Op.getSimpleValueType();
12786 switch (VT.SimpleTy) {
12791 if (Subtarget->hasBWI() && Subtarget->hasVLX())
12796 // We couldn't create a "Blend with immediate" node.
12797 // This node should still be legal, but we'll have to emit a blendv*
12802 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
12803 MVT VT = Op.getSimpleValueType();
12806 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
12809 if (VT.getSizeInBits() == 8) {
12810 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
12811 Op.getOperand(0), Op.getOperand(1));
12812 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
12813 DAG.getValueType(VT));
12814 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12817 if (VT.getSizeInBits() == 16) {
12818 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12819 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
12821 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
12822 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12823 DAG.getNode(ISD::BITCAST, dl,
12826 Op.getOperand(1)));
12827 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
12828 Op.getOperand(0), Op.getOperand(1));
12829 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
12830 DAG.getValueType(VT));
12831 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12834 if (VT == MVT::f32) {
12835 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
12836 // the result back to FR32 register. It's only worth matching if the
12837 // result has a single use which is a store or a bitcast to i32. And in
12838 // the case of a store, it's not worth it if the index is a constant 0,
12839 // because a MOVSSmr can be used instead, which is smaller and faster.
12840 if (!Op.hasOneUse())
12842 SDNode *User = *Op.getNode()->use_begin();
12843 if ((User->getOpcode() != ISD::STORE ||
12844 (isa<ConstantSDNode>(Op.getOperand(1)) &&
12845 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
12846 (User->getOpcode() != ISD::BITCAST ||
12847 User->getValueType(0) != MVT::i32))
12849 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12850 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
12853 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
12856 if (VT == MVT::i32 || VT == MVT::i64) {
12857 // ExtractPS/pextrq works with constant index.
12858 if (isa<ConstantSDNode>(Op.getOperand(1)))
12864 /// Extract one bit from mask vector, like v16i1 or v8i1.
12865 /// AVX-512 feature.
12867 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
12868 SDValue Vec = Op.getOperand(0);
12870 MVT VecVT = Vec.getSimpleValueType();
12871 SDValue Idx = Op.getOperand(1);
12872 MVT EltVT = Op.getSimpleValueType();
12874 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
12875 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
12876 "Unexpected vector type in ExtractBitFromMaskVector");
12878 // variable index can't be handled in mask registers,
12879 // extend vector to VR512
12880 if (!isa<ConstantSDNode>(Idx)) {
12881 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
12882 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
12883 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
12884 ExtVT.getVectorElementType(), Ext, Idx);
12885 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
12888 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12889 const TargetRegisterClass* rc = getRegClassFor(VecVT);
12890 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
12891 rc = getRegClassFor(MVT::v16i1);
12892 unsigned MaxSift = rc->getSize()*8 - 1;
12893 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
12894 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
12895 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
12896 DAG.getConstant(MaxSift, MVT::i8));
12897 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
12898 DAG.getIntPtrConstant(0));
12902 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
12903 SelectionDAG &DAG) const {
12905 SDValue Vec = Op.getOperand(0);
12906 MVT VecVT = Vec.getSimpleValueType();
12907 SDValue Idx = Op.getOperand(1);
12909 if (Op.getSimpleValueType() == MVT::i1)
12910 return ExtractBitFromMaskVector(Op, DAG);
12912 if (!isa<ConstantSDNode>(Idx)) {
12913 if (VecVT.is512BitVector() ||
12914 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
12915 VecVT.getVectorElementType().getSizeInBits() == 32)) {
12918 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
12919 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
12920 MaskEltVT.getSizeInBits());
12922 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
12923 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
12924 getZeroVector(MaskVT, Subtarget, DAG, dl),
12925 Idx, DAG.getConstant(0, getPointerTy()));
12926 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
12927 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
12928 Perm, DAG.getConstant(0, getPointerTy()));
12933 // If this is a 256-bit vector result, first extract the 128-bit vector and
12934 // then extract the element from the 128-bit vector.
12935 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
12937 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12938 // Get the 128-bit vector.
12939 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
12940 MVT EltVT = VecVT.getVectorElementType();
12942 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
12944 //if (IdxVal >= NumElems/2)
12945 // IdxVal -= NumElems/2;
12946 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
12947 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
12948 DAG.getConstant(IdxVal, MVT::i32));
12951 assert(VecVT.is128BitVector() && "Unexpected vector length");
12953 if (Subtarget->hasSSE41()) {
12954 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
12959 MVT VT = Op.getSimpleValueType();
12960 // TODO: handle v16i8.
12961 if (VT.getSizeInBits() == 16) {
12962 SDValue Vec = Op.getOperand(0);
12963 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12965 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
12966 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12967 DAG.getNode(ISD::BITCAST, dl,
12969 Op.getOperand(1)));
12970 // Transform it so it match pextrw which produces a 32-bit result.
12971 MVT EltVT = MVT::i32;
12972 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
12973 Op.getOperand(0), Op.getOperand(1));
12974 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
12975 DAG.getValueType(VT));
12976 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12979 if (VT.getSizeInBits() == 32) {
12980 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12984 // SHUFPS the element to the lowest double word, then movss.
12985 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
12986 MVT VVT = Op.getOperand(0).getSimpleValueType();
12987 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
12988 DAG.getUNDEF(VVT), Mask);
12989 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
12990 DAG.getIntPtrConstant(0));
12993 if (VT.getSizeInBits() == 64) {
12994 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
12995 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
12996 // to match extract_elt for f64.
12997 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13001 // UNPCKHPD the element to the lowest double word, then movsd.
13002 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
13003 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
13004 int Mask[2] = { 1, -1 };
13005 MVT VVT = Op.getOperand(0).getSimpleValueType();
13006 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13007 DAG.getUNDEF(VVT), Mask);
13008 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13009 DAG.getIntPtrConstant(0));
13015 /// Insert one bit to mask vector, like v16i1 or v8i1.
13016 /// AVX-512 feature.
13018 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13020 SDValue Vec = Op.getOperand(0);
13021 SDValue Elt = Op.getOperand(1);
13022 SDValue Idx = Op.getOperand(2);
13023 MVT VecVT = Vec.getSimpleValueType();
13025 if (!isa<ConstantSDNode>(Idx)) {
13026 // Non constant index. Extend source and destination,
13027 // insert element and then truncate the result.
13028 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13029 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13030 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13031 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13032 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13033 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13036 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13037 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13038 if (Vec.getOpcode() == ISD::UNDEF)
13039 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13040 DAG.getConstant(IdxVal, MVT::i8));
13041 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13042 unsigned MaxSift = rc->getSize()*8 - 1;
13043 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13044 DAG.getConstant(MaxSift, MVT::i8));
13045 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13046 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13047 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13050 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13051 SelectionDAG &DAG) const {
13052 MVT VT = Op.getSimpleValueType();
13053 MVT EltVT = VT.getVectorElementType();
13055 if (EltVT == MVT::i1)
13056 return InsertBitToMaskVector(Op, DAG);
13059 SDValue N0 = Op.getOperand(0);
13060 SDValue N1 = Op.getOperand(1);
13061 SDValue N2 = Op.getOperand(2);
13062 if (!isa<ConstantSDNode>(N2))
13064 auto *N2C = cast<ConstantSDNode>(N2);
13065 unsigned IdxVal = N2C->getZExtValue();
13067 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13068 // into that, and then insert the subvector back into the result.
13069 if (VT.is256BitVector() || VT.is512BitVector()) {
13070 // Get the desired 128-bit vector half.
13071 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13073 // Insert the element into the desired half.
13074 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13075 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13077 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13078 DAG.getConstant(IdxIn128, MVT::i32));
13080 // Insert the changed part back to the 256-bit vector
13081 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13083 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13085 if (Subtarget->hasSSE41()) {
13086 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13088 if (VT == MVT::v8i16) {
13089 Opc = X86ISD::PINSRW;
13091 assert(VT == MVT::v16i8);
13092 Opc = X86ISD::PINSRB;
13095 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13097 if (N1.getValueType() != MVT::i32)
13098 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13099 if (N2.getValueType() != MVT::i32)
13100 N2 = DAG.getIntPtrConstant(IdxVal);
13101 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13104 if (EltVT == MVT::f32) {
13105 // Bits [7:6] of the constant are the source select. This will always be
13106 // zero here. The DAG Combiner may combine an extract_elt index into
13108 // bits. For example (insert (extract, 3), 2) could be matched by
13110 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13111 // Bits [5:4] of the constant are the destination select. This is the
13112 // value of the incoming immediate.
13113 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13114 // combine either bitwise AND or insert of float 0.0 to set these bits.
13115 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13116 // Create this as a scalar to vector..
13117 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13118 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13121 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13122 // PINSR* works with constant index.
13127 if (EltVT == MVT::i8)
13130 if (EltVT.getSizeInBits() == 16) {
13131 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13132 // as its second argument.
13133 if (N1.getValueType() != MVT::i32)
13134 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13135 if (N2.getValueType() != MVT::i32)
13136 N2 = DAG.getIntPtrConstant(IdxVal);
13137 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13142 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13144 MVT OpVT = Op.getSimpleValueType();
13146 // If this is a 256-bit vector result, first insert into a 128-bit
13147 // vector and then insert into the 256-bit vector.
13148 if (!OpVT.is128BitVector()) {
13149 // Insert into a 128-bit vector.
13150 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13151 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13152 OpVT.getVectorNumElements() / SizeFactor);
13154 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13156 // Insert the 128-bit vector.
13157 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13160 if (OpVT == MVT::v1i64 &&
13161 Op.getOperand(0).getValueType() == MVT::i64)
13162 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13164 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13165 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13166 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13167 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13170 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13171 // a simple subregister reference or explicit instructions to grab
13172 // upper bits of a vector.
13173 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13174 SelectionDAG &DAG) {
13176 SDValue In = Op.getOperand(0);
13177 SDValue Idx = Op.getOperand(1);
13178 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13179 MVT ResVT = Op.getSimpleValueType();
13180 MVT InVT = In.getSimpleValueType();
13182 if (Subtarget->hasFp256()) {
13183 if (ResVT.is128BitVector() &&
13184 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13185 isa<ConstantSDNode>(Idx)) {
13186 return Extract128BitVector(In, IdxVal, DAG, dl);
13188 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13189 isa<ConstantSDNode>(Idx)) {
13190 return Extract256BitVector(In, IdxVal, DAG, dl);
13196 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13197 // simple superregister reference or explicit instructions to insert
13198 // the upper bits of a vector.
13199 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13200 SelectionDAG &DAG) {
13201 if (!Subtarget->hasAVX())
13205 SDValue Vec = Op.getOperand(0);
13206 SDValue SubVec = Op.getOperand(1);
13207 SDValue Idx = Op.getOperand(2);
13208 MVT OpVT = Op.getSimpleValueType();
13209 MVT SubVecVT = SubVec.getSimpleValueType();
13211 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13212 SubVecVT.is128BitVector() && isa<ConstantSDNode>(Idx)) {
13213 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13214 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13217 if (OpVT.is512BitVector() &&
13218 SubVecVT.is256BitVector() && isa<ConstantSDNode>(Idx)) {
13219 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13220 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13226 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13227 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13228 // one of the above mentioned nodes. It has to be wrapped because otherwise
13229 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13230 // be used to form addressing mode. These wrapped nodes will be selected
13233 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13234 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13236 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13237 // global base reg.
13238 unsigned char OpFlag = 0;
13239 unsigned WrapperKind = X86ISD::Wrapper;
13240 CodeModel::Model M = DAG.getTarget().getCodeModel();
13242 if (Subtarget->isPICStyleRIPRel() &&
13243 (M == CodeModel::Small || M == CodeModel::Kernel))
13244 WrapperKind = X86ISD::WrapperRIP;
13245 else if (Subtarget->isPICStyleGOT())
13246 OpFlag = X86II::MO_GOTOFF;
13247 else if (Subtarget->isPICStyleStubPIC())
13248 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13250 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13251 CP->getAlignment(),
13252 CP->getOffset(), OpFlag);
13254 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13255 // With PIC, the address is actually $g + Offset.
13257 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13258 DAG.getNode(X86ISD::GlobalBaseReg,
13259 SDLoc(), getPointerTy()),
13266 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13267 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13269 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13270 // global base reg.
13271 unsigned char OpFlag = 0;
13272 unsigned WrapperKind = X86ISD::Wrapper;
13273 CodeModel::Model M = DAG.getTarget().getCodeModel();
13275 if (Subtarget->isPICStyleRIPRel() &&
13276 (M == CodeModel::Small || M == CodeModel::Kernel))
13277 WrapperKind = X86ISD::WrapperRIP;
13278 else if (Subtarget->isPICStyleGOT())
13279 OpFlag = X86II::MO_GOTOFF;
13280 else if (Subtarget->isPICStyleStubPIC())
13281 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13283 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13286 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13288 // With PIC, the address is actually $g + Offset.
13290 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13291 DAG.getNode(X86ISD::GlobalBaseReg,
13292 SDLoc(), getPointerTy()),
13299 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13300 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13302 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13303 // global base reg.
13304 unsigned char OpFlag = 0;
13305 unsigned WrapperKind = X86ISD::Wrapper;
13306 CodeModel::Model M = DAG.getTarget().getCodeModel();
13308 if (Subtarget->isPICStyleRIPRel() &&
13309 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13310 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13311 OpFlag = X86II::MO_GOTPCREL;
13312 WrapperKind = X86ISD::WrapperRIP;
13313 } else if (Subtarget->isPICStyleGOT()) {
13314 OpFlag = X86II::MO_GOT;
13315 } else if (Subtarget->isPICStyleStubPIC()) {
13316 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13317 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13318 OpFlag = X86II::MO_DARWIN_NONLAZY;
13321 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13324 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13326 // With PIC, the address is actually $g + Offset.
13327 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13328 !Subtarget->is64Bit()) {
13329 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13330 DAG.getNode(X86ISD::GlobalBaseReg,
13331 SDLoc(), getPointerTy()),
13335 // For symbols that require a load from a stub to get the address, emit the
13337 if (isGlobalStubReference(OpFlag))
13338 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13339 MachinePointerInfo::getGOT(), false, false, false, 0);
13345 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13346 // Create the TargetBlockAddressAddress node.
13347 unsigned char OpFlags =
13348 Subtarget->ClassifyBlockAddressReference();
13349 CodeModel::Model M = DAG.getTarget().getCodeModel();
13350 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13351 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13353 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13356 if (Subtarget->isPICStyleRIPRel() &&
13357 (M == CodeModel::Small || M == CodeModel::Kernel))
13358 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13360 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13362 // With PIC, the address is actually $g + Offset.
13363 if (isGlobalRelativeToPICBase(OpFlags)) {
13364 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13365 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13373 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13374 int64_t Offset, SelectionDAG &DAG) const {
13375 // Create the TargetGlobalAddress node, folding in the constant
13376 // offset if it is legal.
13377 unsigned char OpFlags =
13378 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13379 CodeModel::Model M = DAG.getTarget().getCodeModel();
13381 if (OpFlags == X86II::MO_NO_FLAG &&
13382 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13383 // A direct static reference to a global.
13384 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13387 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13390 if (Subtarget->isPICStyleRIPRel() &&
13391 (M == CodeModel::Small || M == CodeModel::Kernel))
13392 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13394 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13396 // With PIC, the address is actually $g + Offset.
13397 if (isGlobalRelativeToPICBase(OpFlags)) {
13398 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13399 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13403 // For globals that require a load from a stub to get the address, emit the
13405 if (isGlobalStubReference(OpFlags))
13406 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13407 MachinePointerInfo::getGOT(), false, false, false, 0);
13409 // If there was a non-zero offset that we didn't fold, create an explicit
13410 // addition for it.
13412 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13413 DAG.getConstant(Offset, getPointerTy()));
13419 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13420 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13421 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13422 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13426 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13427 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13428 unsigned char OperandFlags, bool LocalDynamic = false) {
13429 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13430 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13432 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13433 GA->getValueType(0),
13437 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13441 SDValue Ops[] = { Chain, TGA, *InFlag };
13442 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13444 SDValue Ops[] = { Chain, TGA };
13445 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13448 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13449 MFI->setAdjustsStack(true);
13450 MFI->setHasCalls(true);
13452 SDValue Flag = Chain.getValue(1);
13453 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13456 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13458 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13461 SDLoc dl(GA); // ? function entry point might be better
13462 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13463 DAG.getNode(X86ISD::GlobalBaseReg,
13464 SDLoc(), PtrVT), InFlag);
13465 InFlag = Chain.getValue(1);
13467 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13470 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13472 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13474 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13475 X86::RAX, X86II::MO_TLSGD);
13478 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13484 // Get the start address of the TLS block for this module.
13485 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13486 .getInfo<X86MachineFunctionInfo>();
13487 MFI->incNumLocalDynamicTLSAccesses();
13491 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13492 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13495 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13496 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13497 InFlag = Chain.getValue(1);
13498 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13499 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13502 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13506 unsigned char OperandFlags = X86II::MO_DTPOFF;
13507 unsigned WrapperKind = X86ISD::Wrapper;
13508 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13509 GA->getValueType(0),
13510 GA->getOffset(), OperandFlags);
13511 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13513 // Add x@dtpoff with the base.
13514 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13517 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13518 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13519 const EVT PtrVT, TLSModel::Model model,
13520 bool is64Bit, bool isPIC) {
13523 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13524 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13525 is64Bit ? 257 : 256));
13527 SDValue ThreadPointer =
13528 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13529 MachinePointerInfo(Ptr), false, false, false, 0);
13531 unsigned char OperandFlags = 0;
13532 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13534 unsigned WrapperKind = X86ISD::Wrapper;
13535 if (model == TLSModel::LocalExec) {
13536 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13537 } else if (model == TLSModel::InitialExec) {
13539 OperandFlags = X86II::MO_GOTTPOFF;
13540 WrapperKind = X86ISD::WrapperRIP;
13542 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13545 llvm_unreachable("Unexpected model");
13548 // emit "addl x@ntpoff,%eax" (local exec)
13549 // or "addl x@indntpoff,%eax" (initial exec)
13550 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13552 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13553 GA->getOffset(), OperandFlags);
13554 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13556 if (model == TLSModel::InitialExec) {
13557 if (isPIC && !is64Bit) {
13558 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13559 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13563 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13564 MachinePointerInfo::getGOT(), false, false, false, 0);
13567 // The address of the thread local variable is the add of the thread
13568 // pointer with the offset of the variable.
13569 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13573 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13575 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13576 const GlobalValue *GV = GA->getGlobal();
13578 if (Subtarget->isTargetELF()) {
13579 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13582 case TLSModel::GeneralDynamic:
13583 if (Subtarget->is64Bit())
13584 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
13585 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
13586 case TLSModel::LocalDynamic:
13587 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
13588 Subtarget->is64Bit());
13589 case TLSModel::InitialExec:
13590 case TLSModel::LocalExec:
13591 return LowerToTLSExecModel(
13592 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
13593 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
13595 llvm_unreachable("Unknown TLS model.");
13598 if (Subtarget->isTargetDarwin()) {
13599 // Darwin only has one model of TLS. Lower to that.
13600 unsigned char OpFlag = 0;
13601 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
13602 X86ISD::WrapperRIP : X86ISD::Wrapper;
13604 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13605 // global base reg.
13606 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
13607 !Subtarget->is64Bit();
13609 OpFlag = X86II::MO_TLVP_PIC_BASE;
13611 OpFlag = X86II::MO_TLVP;
13613 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
13614 GA->getValueType(0),
13615 GA->getOffset(), OpFlag);
13616 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13618 // With PIC32, the address is actually $g + Offset.
13620 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13621 DAG.getNode(X86ISD::GlobalBaseReg,
13622 SDLoc(), getPointerTy()),
13625 // Lowering the machine isd will make sure everything is in the right
13627 SDValue Chain = DAG.getEntryNode();
13628 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13629 SDValue Args[] = { Chain, Offset };
13630 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
13632 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
13633 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13634 MFI->setAdjustsStack(true);
13636 // And our return value (tls address) is in the standard call return value
13638 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
13639 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
13640 Chain.getValue(1));
13643 if (Subtarget->isTargetKnownWindowsMSVC() ||
13644 Subtarget->isTargetWindowsGNU()) {
13645 // Just use the implicit TLS architecture
13646 // Need to generate someting similar to:
13647 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
13649 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
13650 // mov rcx, qword [rdx+rcx*8]
13651 // mov eax, .tls$:tlsvar
13652 // [rax+rcx] contains the address
13653 // Windows 64bit: gs:0x58
13654 // Windows 32bit: fs:__tls_array
13657 SDValue Chain = DAG.getEntryNode();
13659 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
13660 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
13661 // use its literal value of 0x2C.
13662 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
13663 ? Type::getInt8PtrTy(*DAG.getContext(),
13665 : Type::getInt32PtrTy(*DAG.getContext(),
13669 Subtarget->is64Bit()
13670 ? DAG.getIntPtrConstant(0x58)
13671 : (Subtarget->isTargetWindowsGNU()
13672 ? DAG.getIntPtrConstant(0x2C)
13673 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
13675 SDValue ThreadPointer =
13676 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
13677 MachinePointerInfo(Ptr), false, false, false, 0);
13679 // Load the _tls_index variable
13680 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
13681 if (Subtarget->is64Bit())
13682 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
13683 IDX, MachinePointerInfo(), MVT::i32,
13684 false, false, false, 0);
13686 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
13687 false, false, false, 0);
13689 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
13691 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
13693 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
13694 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
13695 false, false, false, 0);
13697 // Get the offset of start of .tls section
13698 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13699 GA->getValueType(0),
13700 GA->getOffset(), X86II::MO_SECREL);
13701 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
13703 // The address of the thread local variable is the add of the thread
13704 // pointer with the offset of the variable.
13705 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
13708 llvm_unreachable("TLS not implemented for this target.");
13711 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
13712 /// and take a 2 x i32 value to shift plus a shift amount.
13713 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
13714 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
13715 MVT VT = Op.getSimpleValueType();
13716 unsigned VTBits = VT.getSizeInBits();
13718 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
13719 SDValue ShOpLo = Op.getOperand(0);
13720 SDValue ShOpHi = Op.getOperand(1);
13721 SDValue ShAmt = Op.getOperand(2);
13722 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
13723 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
13725 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
13726 DAG.getConstant(VTBits - 1, MVT::i8));
13727 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
13728 DAG.getConstant(VTBits - 1, MVT::i8))
13729 : DAG.getConstant(0, VT);
13731 SDValue Tmp2, Tmp3;
13732 if (Op.getOpcode() == ISD::SHL_PARTS) {
13733 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
13734 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
13736 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
13737 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
13740 // If the shift amount is larger or equal than the width of a part we can't
13741 // rely on the results of shld/shrd. Insert a test and select the appropriate
13742 // values for large shift amounts.
13743 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
13744 DAG.getConstant(VTBits, MVT::i8));
13745 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
13746 AndNode, DAG.getConstant(0, MVT::i8));
13749 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
13750 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
13751 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
13753 if (Op.getOpcode() == ISD::SHL_PARTS) {
13754 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
13755 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
13757 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
13758 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
13761 SDValue Ops[2] = { Lo, Hi };
13762 return DAG.getMergeValues(Ops, dl);
13765 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
13766 SelectionDAG &DAG) const {
13767 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
13770 if (SrcVT.isVector()) {
13771 if (SrcVT.getVectorElementType() == MVT::i1) {
13772 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
13773 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
13774 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
13775 Op.getOperand(0)));
13780 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
13781 "Unknown SINT_TO_FP to lower!");
13783 // These are really Legal; return the operand so the caller accepts it as
13785 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
13787 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
13788 Subtarget->is64Bit()) {
13792 unsigned Size = SrcVT.getSizeInBits()/8;
13793 MachineFunction &MF = DAG.getMachineFunction();
13794 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
13795 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
13796 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
13798 MachinePointerInfo::getFixedStack(SSFI),
13800 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
13803 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
13805 SelectionDAG &DAG) const {
13809 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
13811 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
13813 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
13815 unsigned ByteSize = SrcVT.getSizeInBits()/8;
13817 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
13818 MachineMemOperand *MMO;
13820 int SSFI = FI->getIndex();
13822 DAG.getMachineFunction()
13823 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
13824 MachineMemOperand::MOLoad, ByteSize, ByteSize);
13826 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
13827 StackSlot = StackSlot.getOperand(1);
13829 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
13830 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
13832 Tys, Ops, SrcVT, MMO);
13835 Chain = Result.getValue(1);
13836 SDValue InFlag = Result.getValue(2);
13838 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
13839 // shouldn't be necessary except that RFP cannot be live across
13840 // multiple blocks. When stackifier is fixed, they can be uncoupled.
13841 MachineFunction &MF = DAG.getMachineFunction();
13842 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
13843 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
13844 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
13845 Tys = DAG.getVTList(MVT::Other);
13847 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
13849 MachineMemOperand *MMO =
13850 DAG.getMachineFunction()
13851 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
13852 MachineMemOperand::MOStore, SSFISize, SSFISize);
13854 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
13855 Ops, Op.getValueType(), MMO);
13856 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
13857 MachinePointerInfo::getFixedStack(SSFI),
13858 false, false, false, 0);
13864 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
13865 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
13866 SelectionDAG &DAG) const {
13867 // This algorithm is not obvious. Here it is what we're trying to output:
13870 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
13871 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
13873 haddpd %xmm0, %xmm0
13875 pshufd $0x4e, %xmm0, %xmm1
13881 LLVMContext *Context = DAG.getContext();
13883 // Build some magic constants.
13884 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
13885 Constant *C0 = ConstantDataVector::get(*Context, CV0);
13886 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
13888 SmallVector<Constant*,2> CV1;
13890 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
13891 APInt(64, 0x4330000000000000ULL))));
13893 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
13894 APInt(64, 0x4530000000000000ULL))));
13895 Constant *C1 = ConstantVector::get(CV1);
13896 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
13898 // Load the 64-bit value into an XMM register.
13899 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
13901 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
13902 MachinePointerInfo::getConstantPool(),
13903 false, false, false, 16);
13904 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
13905 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
13908 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
13909 MachinePointerInfo::getConstantPool(),
13910 false, false, false, 16);
13911 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
13912 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
13915 if (Subtarget->hasSSE3()) {
13916 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
13917 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
13919 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
13920 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
13922 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
13923 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
13927 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
13928 DAG.getIntPtrConstant(0));
13931 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
13932 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
13933 SelectionDAG &DAG) const {
13935 // FP constant to bias correct the final result.
13936 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
13939 // Load the 32-bit value into an XMM register.
13940 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
13943 // Zero out the upper parts of the register.
13944 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
13946 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
13947 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
13948 DAG.getIntPtrConstant(0));
13950 // Or the load with the bias.
13951 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
13952 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
13953 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
13954 MVT::v2f64, Load)),
13955 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
13956 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
13957 MVT::v2f64, Bias)));
13958 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
13959 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
13960 DAG.getIntPtrConstant(0));
13962 // Subtract the bias.
13963 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
13965 // Handle final rounding.
13966 EVT DestVT = Op.getValueType();
13968 if (DestVT.bitsLT(MVT::f64))
13969 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
13970 DAG.getIntPtrConstant(0));
13971 if (DestVT.bitsGT(MVT::f64))
13972 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
13974 // Handle final rounding.
13978 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
13979 const X86Subtarget &Subtarget) {
13980 // The algorithm is the following:
13981 // #ifdef __SSE4_1__
13982 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
13983 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
13984 // (uint4) 0x53000000, 0xaa);
13986 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
13987 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
13989 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
13990 // return (float4) lo + fhi;
13993 SDValue V = Op->getOperand(0);
13994 EVT VecIntVT = V.getValueType();
13995 bool Is128 = VecIntVT == MVT::v4i32;
13996 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
13997 // If we convert to something else than the supported type, e.g., to v4f64,
13999 if (VecFloatVT != Op->getValueType(0))
14002 unsigned NumElts = VecIntVT.getVectorNumElements();
14003 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
14004 "Unsupported custom type");
14005 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14007 // In the #idef/#else code, we have in common:
14008 // - The vector of constants:
14014 // Create the splat vector for 0x4b000000.
14015 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14016 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14017 CstLow, CstLow, CstLow, CstLow};
14018 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14019 makeArrayRef(&CstLowArray[0], NumElts));
14020 // Create the splat vector for 0x53000000.
14021 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14022 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14023 CstHigh, CstHigh, CstHigh, CstHigh};
14024 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14025 makeArrayRef(&CstHighArray[0], NumElts));
14027 // Create the right shift.
14028 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14029 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14030 CstShift, CstShift, CstShift, CstShift};
14031 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14032 makeArrayRef(&CstShiftArray[0], NumElts));
14033 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14036 if (Subtarget.hasSSE41()) {
14037 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14038 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14039 SDValue VecCstLowBitcast =
14040 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14041 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14042 // Low will be bitcasted right away, so do not bother bitcasting back to its
14044 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14045 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14046 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14047 // (uint4) 0x53000000, 0xaa);
14048 SDValue VecCstHighBitcast =
14049 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14050 SDValue VecShiftBitcast =
14051 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14052 // High will be bitcasted right away, so do not bother bitcasting back to
14053 // its original type.
14054 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14055 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14057 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14058 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14059 CstMask, CstMask, CstMask);
14060 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14061 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14062 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14064 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14065 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14068 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14069 SDValue CstFAdd = DAG.getConstantFP(
14070 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14071 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14072 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14073 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14074 makeArrayRef(&CstFAddArray[0], NumElts));
14076 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14077 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14079 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14080 // return (float4) lo + fhi;
14081 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14082 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14085 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14086 SelectionDAG &DAG) const {
14087 SDValue N0 = Op.getOperand(0);
14088 MVT SVT = N0.getSimpleValueType();
14091 switch (SVT.SimpleTy) {
14093 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14098 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14099 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14100 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14104 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14106 llvm_unreachable(nullptr);
14109 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14110 SelectionDAG &DAG) const {
14111 SDValue N0 = Op.getOperand(0);
14114 if (Op.getValueType().isVector())
14115 return lowerUINT_TO_FP_vec(Op, DAG);
14117 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14118 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14119 // the optimization here.
14120 if (DAG.SignBitIsZero(N0))
14121 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14123 MVT SrcVT = N0.getSimpleValueType();
14124 MVT DstVT = Op.getSimpleValueType();
14125 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14126 return LowerUINT_TO_FP_i64(Op, DAG);
14127 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14128 return LowerUINT_TO_FP_i32(Op, DAG);
14129 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14132 // Make a 64-bit buffer, and use it to build an FILD.
14133 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14134 if (SrcVT == MVT::i32) {
14135 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14136 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14137 getPointerTy(), StackSlot, WordOff);
14138 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14139 StackSlot, MachinePointerInfo(),
14141 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14142 OffsetSlot, MachinePointerInfo(),
14144 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14148 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14149 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14150 StackSlot, MachinePointerInfo(),
14152 // For i64 source, we need to add the appropriate power of 2 if the input
14153 // was negative. This is the same as the optimization in
14154 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14155 // we must be careful to do the computation in x87 extended precision, not
14156 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14157 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14158 MachineMemOperand *MMO =
14159 DAG.getMachineFunction()
14160 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14161 MachineMemOperand::MOLoad, 8, 8);
14163 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14164 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14165 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14168 APInt FF(32, 0x5F800000ULL);
14170 // Check whether the sign bit is set.
14171 SDValue SignSet = DAG.getSetCC(dl,
14172 getSetCCResultType(*DAG.getContext(), MVT::i64),
14173 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14176 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14177 SDValue FudgePtr = DAG.getConstantPool(
14178 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14181 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14182 SDValue Zero = DAG.getIntPtrConstant(0);
14183 SDValue Four = DAG.getIntPtrConstant(4);
14184 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14186 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14188 // Load the value out, extending it from f32 to f80.
14189 // FIXME: Avoid the extend by constructing the right constant pool?
14190 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14191 FudgePtr, MachinePointerInfo::getConstantPool(),
14192 MVT::f32, false, false, false, 4);
14193 // Extend everything to 80 bits to force it to be done on x87.
14194 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14195 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14198 std::pair<SDValue,SDValue>
14199 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14200 bool IsSigned, bool IsReplace) const {
14203 EVT DstTy = Op.getValueType();
14205 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14206 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14210 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14211 DstTy.getSimpleVT() >= MVT::i16 &&
14212 "Unknown FP_TO_INT to lower!");
14214 // These are really Legal.
14215 if (DstTy == MVT::i32 &&
14216 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14217 return std::make_pair(SDValue(), SDValue());
14218 if (Subtarget->is64Bit() &&
14219 DstTy == MVT::i64 &&
14220 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14221 return std::make_pair(SDValue(), SDValue());
14223 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14224 // stack slot, or into the FTOL runtime function.
14225 MachineFunction &MF = DAG.getMachineFunction();
14226 unsigned MemSize = DstTy.getSizeInBits()/8;
14227 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14228 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14231 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14232 Opc = X86ISD::WIN_FTOL;
14234 switch (DstTy.getSimpleVT().SimpleTy) {
14235 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14236 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14237 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14238 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14241 SDValue Chain = DAG.getEntryNode();
14242 SDValue Value = Op.getOperand(0);
14243 EVT TheVT = Op.getOperand(0).getValueType();
14244 // FIXME This causes a redundant load/store if the SSE-class value is already
14245 // in memory, such as if it is on the callstack.
14246 if (isScalarFPTypeInSSEReg(TheVT)) {
14247 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14248 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14249 MachinePointerInfo::getFixedStack(SSFI),
14251 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14253 Chain, StackSlot, DAG.getValueType(TheVT)
14256 MachineMemOperand *MMO =
14257 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14258 MachineMemOperand::MOLoad, MemSize, MemSize);
14259 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14260 Chain = Value.getValue(1);
14261 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14262 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14265 MachineMemOperand *MMO =
14266 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14267 MachineMemOperand::MOStore, MemSize, MemSize);
14269 if (Opc != X86ISD::WIN_FTOL) {
14270 // Build the FP_TO_INT*_IN_MEM
14271 SDValue Ops[] = { Chain, Value, StackSlot };
14272 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14274 return std::make_pair(FIST, StackSlot);
14276 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14277 DAG.getVTList(MVT::Other, MVT::Glue),
14279 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14280 MVT::i32, ftol.getValue(1));
14281 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14282 MVT::i32, eax.getValue(2));
14283 SDValue Ops[] = { eax, edx };
14284 SDValue pair = IsReplace
14285 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14286 : DAG.getMergeValues(Ops, DL);
14287 return std::make_pair(pair, SDValue());
14291 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14292 const X86Subtarget *Subtarget) {
14293 MVT VT = Op->getSimpleValueType(0);
14294 SDValue In = Op->getOperand(0);
14295 MVT InVT = In.getSimpleValueType();
14298 // Optimize vectors in AVX mode:
14301 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14302 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14303 // Concat upper and lower parts.
14306 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14307 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14308 // Concat upper and lower parts.
14311 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14312 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14313 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14316 if (Subtarget->hasInt256())
14317 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14319 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14320 SDValue Undef = DAG.getUNDEF(InVT);
14321 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14322 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14323 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14325 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14326 VT.getVectorNumElements()/2);
14328 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14329 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14331 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14334 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14335 SelectionDAG &DAG) {
14336 MVT VT = Op->getSimpleValueType(0);
14337 SDValue In = Op->getOperand(0);
14338 MVT InVT = In.getSimpleValueType();
14340 unsigned int NumElts = VT.getVectorNumElements();
14341 if (NumElts != 8 && NumElts != 16)
14344 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14345 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14347 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14348 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14349 // Now we have only mask extension
14350 assert(InVT.getVectorElementType() == MVT::i1);
14351 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14352 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14353 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14354 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14355 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14356 MachinePointerInfo::getConstantPool(),
14357 false, false, false, Alignment);
14359 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14360 if (VT.is512BitVector())
14362 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14365 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14366 SelectionDAG &DAG) {
14367 if (Subtarget->hasFp256()) {
14368 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14376 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14377 SelectionDAG &DAG) {
14379 MVT VT = Op.getSimpleValueType();
14380 SDValue In = Op.getOperand(0);
14381 MVT SVT = In.getSimpleValueType();
14383 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14384 return LowerZERO_EXTEND_AVX512(Op, DAG);
14386 if (Subtarget->hasFp256()) {
14387 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14392 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14393 VT.getVectorNumElements() != SVT.getVectorNumElements());
14397 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14399 MVT VT = Op.getSimpleValueType();
14400 SDValue In = Op.getOperand(0);
14401 MVT InVT = In.getSimpleValueType();
14403 if (VT == MVT::i1) {
14404 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14405 "Invalid scalar TRUNCATE operation");
14406 if (InVT.getSizeInBits() >= 32)
14408 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14409 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14411 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14412 "Invalid TRUNCATE operation");
14414 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14415 if (VT.getVectorElementType().getSizeInBits() >=8)
14416 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14418 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14419 unsigned NumElts = InVT.getVectorNumElements();
14420 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14421 if (InVT.getSizeInBits() < 512) {
14422 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14423 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14427 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14428 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14429 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14430 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14431 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14432 MachinePointerInfo::getConstantPool(),
14433 false, false, false, Alignment);
14434 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14435 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14436 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14439 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14440 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14441 if (Subtarget->hasInt256()) {
14442 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14443 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14444 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14446 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14447 DAG.getIntPtrConstant(0));
14450 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14451 DAG.getIntPtrConstant(0));
14452 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14453 DAG.getIntPtrConstant(2));
14454 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14455 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14456 static const int ShufMask[] = {0, 2, 4, 6};
14457 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14460 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14461 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14462 if (Subtarget->hasInt256()) {
14463 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14465 SmallVector<SDValue,32> pshufbMask;
14466 for (unsigned i = 0; i < 2; ++i) {
14467 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14468 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14469 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14470 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14471 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14472 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14473 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14474 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14475 for (unsigned j = 0; j < 8; ++j)
14476 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14478 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14479 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14480 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14482 static const int ShufMask[] = {0, 2, -1, -1};
14483 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14485 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14486 DAG.getIntPtrConstant(0));
14487 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14490 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14491 DAG.getIntPtrConstant(0));
14493 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14494 DAG.getIntPtrConstant(4));
14496 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14497 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14499 // The PSHUFB mask:
14500 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14501 -1, -1, -1, -1, -1, -1, -1, -1};
14503 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14504 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14505 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14507 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14508 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14510 // The MOVLHPS Mask:
14511 static const int ShufMask2[] = {0, 1, 4, 5};
14512 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14513 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14516 // Handle truncation of V256 to V128 using shuffles.
14517 if (!VT.is128BitVector() || !InVT.is256BitVector())
14520 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14522 unsigned NumElems = VT.getVectorNumElements();
14523 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14525 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14526 // Prepare truncation shuffle mask
14527 for (unsigned i = 0; i != NumElems; ++i)
14528 MaskVec[i] = i * 2;
14529 SDValue V = DAG.getVectorShuffle(NVT, DL,
14530 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14531 DAG.getUNDEF(NVT), &MaskVec[0]);
14532 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14533 DAG.getIntPtrConstant(0));
14536 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14537 SelectionDAG &DAG) const {
14538 assert(!Op.getSimpleValueType().isVector());
14540 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14541 /*IsSigned=*/ true, /*IsReplace=*/ false);
14542 SDValue FIST = Vals.first, StackSlot = Vals.second;
14543 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14544 if (!FIST.getNode()) return Op;
14546 if (StackSlot.getNode())
14547 // Load the result.
14548 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14549 FIST, StackSlot, MachinePointerInfo(),
14550 false, false, false, 0);
14552 // The node is the result.
14556 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14557 SelectionDAG &DAG) const {
14558 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14559 /*IsSigned=*/ false, /*IsReplace=*/ false);
14560 SDValue FIST = Vals.first, StackSlot = Vals.second;
14561 assert(FIST.getNode() && "Unexpected failure");
14563 if (StackSlot.getNode())
14564 // Load the result.
14565 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14566 FIST, StackSlot, MachinePointerInfo(),
14567 false, false, false, 0);
14569 // The node is the result.
14573 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14575 MVT VT = Op.getSimpleValueType();
14576 SDValue In = Op.getOperand(0);
14577 MVT SVT = In.getSimpleValueType();
14579 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14581 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14582 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14583 In, DAG.getUNDEF(SVT)));
14586 /// The only differences between FABS and FNEG are the mask and the logic op.
14587 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14588 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14589 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14590 "Wrong opcode for lowering FABS or FNEG.");
14592 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14594 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14595 // into an FNABS. We'll lower the FABS after that if it is still in use.
14597 for (SDNode *User : Op->uses())
14598 if (User->getOpcode() == ISD::FNEG)
14601 SDValue Op0 = Op.getOperand(0);
14602 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
14605 MVT VT = Op.getSimpleValueType();
14606 // Assume scalar op for initialization; update for vector if needed.
14607 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
14608 // generate a 16-byte vector constant and logic op even for the scalar case.
14609 // Using a 16-byte mask allows folding the load of the mask with
14610 // the logic op, so it can save (~4 bytes) on code size.
14612 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
14613 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
14614 // decide if we should generate a 16-byte constant mask when we only need 4 or
14615 // 8 bytes for the scalar case.
14616 if (VT.isVector()) {
14617 EltVT = VT.getVectorElementType();
14618 NumElts = VT.getVectorNumElements();
14621 unsigned EltBits = EltVT.getSizeInBits();
14622 LLVMContext *Context = DAG.getContext();
14623 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
14625 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
14626 Constant *C = ConstantInt::get(*Context, MaskElt);
14627 C = ConstantVector::getSplat(NumElts, C);
14628 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14629 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
14630 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
14631 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14632 MachinePointerInfo::getConstantPool(),
14633 false, false, false, Alignment);
14635 if (VT.isVector()) {
14636 // For a vector, cast operands to a vector type, perform the logic op,
14637 // and cast the result back to the original value type.
14638 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
14639 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
14640 SDValue Operand = IsFNABS ?
14641 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
14642 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
14643 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
14644 return DAG.getNode(ISD::BITCAST, dl, VT,
14645 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
14648 // If not vector, then scalar.
14649 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
14650 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
14651 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
14654 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
14655 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14656 LLVMContext *Context = DAG.getContext();
14657 SDValue Op0 = Op.getOperand(0);
14658 SDValue Op1 = Op.getOperand(1);
14660 MVT VT = Op.getSimpleValueType();
14661 MVT SrcVT = Op1.getSimpleValueType();
14663 // If second operand is smaller, extend it first.
14664 if (SrcVT.bitsLT(VT)) {
14665 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
14668 // And if it is bigger, shrink it first.
14669 if (SrcVT.bitsGT(VT)) {
14670 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
14674 // At this point the operands and the result should have the same
14675 // type, and that won't be f80 since that is not custom lowered.
14677 const fltSemantics &Sem =
14678 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
14679 const unsigned SizeInBits = VT.getSizeInBits();
14681 SmallVector<Constant *, 4> CV(
14682 VT == MVT::f64 ? 2 : 4,
14683 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
14685 // First, clear all bits but the sign bit from the second operand (sign).
14686 CV[0] = ConstantFP::get(*Context,
14687 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
14688 Constant *C = ConstantVector::get(CV);
14689 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14690 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
14691 MachinePointerInfo::getConstantPool(),
14692 false, false, false, 16);
14693 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
14695 // Next, clear the sign bit from the first operand (magnitude).
14696 // If it's a constant, we can clear it here.
14697 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
14698 APFloat APF = Op0CN->getValueAPF();
14699 // If the magnitude is a positive zero, the sign bit alone is enough.
14700 if (APF.isPosZero())
14703 CV[0] = ConstantFP::get(*Context, APF);
14705 CV[0] = ConstantFP::get(
14707 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
14709 C = ConstantVector::get(CV);
14710 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14711 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14712 MachinePointerInfo::getConstantPool(),
14713 false, false, false, 16);
14714 // If the magnitude operand wasn't a constant, we need to AND out the sign.
14715 if (!isa<ConstantFPSDNode>(Op0))
14716 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
14718 // OR the magnitude value with the sign bit.
14719 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
14722 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
14723 SDValue N0 = Op.getOperand(0);
14725 MVT VT = Op.getSimpleValueType();
14727 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
14728 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
14729 DAG.getConstant(1, VT));
14730 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
14733 // Check whether an OR'd tree is PTEST-able.
14734 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
14735 SelectionDAG &DAG) {
14736 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
14738 if (!Subtarget->hasSSE41())
14741 if (!Op->hasOneUse())
14744 SDNode *N = Op.getNode();
14747 SmallVector<SDValue, 8> Opnds;
14748 DenseMap<SDValue, unsigned> VecInMap;
14749 SmallVector<SDValue, 8> VecIns;
14750 EVT VT = MVT::Other;
14752 // Recognize a special case where a vector is casted into wide integer to
14754 Opnds.push_back(N->getOperand(0));
14755 Opnds.push_back(N->getOperand(1));
14757 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
14758 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
14759 // BFS traverse all OR'd operands.
14760 if (I->getOpcode() == ISD::OR) {
14761 Opnds.push_back(I->getOperand(0));
14762 Opnds.push_back(I->getOperand(1));
14763 // Re-evaluate the number of nodes to be traversed.
14764 e += 2; // 2 more nodes (LHS and RHS) are pushed.
14768 // Quit if a non-EXTRACT_VECTOR_ELT
14769 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14772 // Quit if without a constant index.
14773 SDValue Idx = I->getOperand(1);
14774 if (!isa<ConstantSDNode>(Idx))
14777 SDValue ExtractedFromVec = I->getOperand(0);
14778 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
14779 if (M == VecInMap.end()) {
14780 VT = ExtractedFromVec.getValueType();
14781 // Quit if not 128/256-bit vector.
14782 if (!VT.is128BitVector() && !VT.is256BitVector())
14784 // Quit if not the same type.
14785 if (VecInMap.begin() != VecInMap.end() &&
14786 VT != VecInMap.begin()->first.getValueType())
14788 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
14789 VecIns.push_back(ExtractedFromVec);
14791 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
14794 assert((VT.is128BitVector() || VT.is256BitVector()) &&
14795 "Not extracted from 128-/256-bit vector.");
14797 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
14799 for (DenseMap<SDValue, unsigned>::const_iterator
14800 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
14801 // Quit if not all elements are used.
14802 if (I->second != FullMask)
14806 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
14808 // Cast all vectors into TestVT for PTEST.
14809 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
14810 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
14812 // If more than one full vectors are evaluated, OR them first before PTEST.
14813 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
14814 // Each iteration will OR 2 nodes and append the result until there is only
14815 // 1 node left, i.e. the final OR'd value of all vectors.
14816 SDValue LHS = VecIns[Slot];
14817 SDValue RHS = VecIns[Slot + 1];
14818 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
14821 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
14822 VecIns.back(), VecIns.back());
14825 /// \brief return true if \c Op has a use that doesn't just read flags.
14826 static bool hasNonFlagsUse(SDValue Op) {
14827 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
14829 SDNode *User = *UI;
14830 unsigned UOpNo = UI.getOperandNo();
14831 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
14832 // Look pass truncate.
14833 UOpNo = User->use_begin().getOperandNo();
14834 User = *User->use_begin();
14837 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
14838 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
14844 /// Emit nodes that will be selected as "test Op0,Op0", or something
14846 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
14847 SelectionDAG &DAG) const {
14848 if (Op.getValueType() == MVT::i1)
14849 // KORTEST instruction should be selected
14850 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
14851 DAG.getConstant(0, Op.getValueType()));
14853 // CF and OF aren't always set the way we want. Determine which
14854 // of these we need.
14855 bool NeedCF = false;
14856 bool NeedOF = false;
14859 case X86::COND_A: case X86::COND_AE:
14860 case X86::COND_B: case X86::COND_BE:
14863 case X86::COND_G: case X86::COND_GE:
14864 case X86::COND_L: case X86::COND_LE:
14865 case X86::COND_O: case X86::COND_NO: {
14866 // Check if we really need to set the
14867 // Overflow flag. If NoSignedWrap is present
14868 // that is not actually needed.
14869 switch (Op->getOpcode()) {
14874 const BinaryWithFlagsSDNode *BinNode =
14875 cast<BinaryWithFlagsSDNode>(Op.getNode());
14876 if (BinNode->hasNoSignedWrap())
14886 // See if we can use the EFLAGS value from the operand instead of
14887 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
14888 // we prove that the arithmetic won't overflow, we can't use OF or CF.
14889 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
14890 // Emit a CMP with 0, which is the TEST pattern.
14891 //if (Op.getValueType() == MVT::i1)
14892 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
14893 // DAG.getConstant(0, MVT::i1));
14894 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
14895 DAG.getConstant(0, Op.getValueType()));
14897 unsigned Opcode = 0;
14898 unsigned NumOperands = 0;
14900 // Truncate operations may prevent the merge of the SETCC instruction
14901 // and the arithmetic instruction before it. Attempt to truncate the operands
14902 // of the arithmetic instruction and use a reduced bit-width instruction.
14903 bool NeedTruncation = false;
14904 SDValue ArithOp = Op;
14905 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
14906 SDValue Arith = Op->getOperand(0);
14907 // Both the trunc and the arithmetic op need to have one user each.
14908 if (Arith->hasOneUse())
14909 switch (Arith.getOpcode()) {
14916 NeedTruncation = true;
14922 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
14923 // which may be the result of a CAST. We use the variable 'Op', which is the
14924 // non-casted variable when we check for possible users.
14925 switch (ArithOp.getOpcode()) {
14927 // Due to an isel shortcoming, be conservative if this add is likely to be
14928 // selected as part of a load-modify-store instruction. When the root node
14929 // in a match is a store, isel doesn't know how to remap non-chain non-flag
14930 // uses of other nodes in the match, such as the ADD in this case. This
14931 // leads to the ADD being left around and reselected, with the result being
14932 // two adds in the output. Alas, even if none our users are stores, that
14933 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
14934 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
14935 // climbing the DAG back to the root, and it doesn't seem to be worth the
14937 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
14938 UE = Op.getNode()->use_end(); UI != UE; ++UI)
14939 if (UI->getOpcode() != ISD::CopyToReg &&
14940 UI->getOpcode() != ISD::SETCC &&
14941 UI->getOpcode() != ISD::STORE)
14944 if (ConstantSDNode *C =
14945 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
14946 // An add of one will be selected as an INC.
14947 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
14948 Opcode = X86ISD::INC;
14953 // An add of negative one (subtract of one) will be selected as a DEC.
14954 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
14955 Opcode = X86ISD::DEC;
14961 // Otherwise use a regular EFLAGS-setting add.
14962 Opcode = X86ISD::ADD;
14967 // If we have a constant logical shift that's only used in a comparison
14968 // against zero turn it into an equivalent AND. This allows turning it into
14969 // a TEST instruction later.
14970 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
14971 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
14972 EVT VT = Op.getValueType();
14973 unsigned BitWidth = VT.getSizeInBits();
14974 unsigned ShAmt = Op->getConstantOperandVal(1);
14975 if (ShAmt >= BitWidth) // Avoid undefined shifts.
14977 APInt Mask = ArithOp.getOpcode() == ISD::SRL
14978 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
14979 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
14980 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
14982 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
14983 DAG.getConstant(Mask, VT));
14984 DAG.ReplaceAllUsesWith(Op, New);
14990 // If the primary and result isn't used, don't bother using X86ISD::AND,
14991 // because a TEST instruction will be better.
14992 if (!hasNonFlagsUse(Op))
14998 // Due to the ISEL shortcoming noted above, be conservative if this op is
14999 // likely to be selected as part of a load-modify-store instruction.
15000 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15001 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15002 if (UI->getOpcode() == ISD::STORE)
15005 // Otherwise use a regular EFLAGS-setting instruction.
15006 switch (ArithOp.getOpcode()) {
15007 default: llvm_unreachable("unexpected operator!");
15008 case ISD::SUB: Opcode = X86ISD::SUB; break;
15009 case ISD::XOR: Opcode = X86ISD::XOR; break;
15010 case ISD::AND: Opcode = X86ISD::AND; break;
15012 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15013 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15014 if (EFLAGS.getNode())
15017 Opcode = X86ISD::OR;
15031 return SDValue(Op.getNode(), 1);
15037 // If we found that truncation is beneficial, perform the truncation and
15039 if (NeedTruncation) {
15040 EVT VT = Op.getValueType();
15041 SDValue WideVal = Op->getOperand(0);
15042 EVT WideVT = WideVal.getValueType();
15043 unsigned ConvertedOp = 0;
15044 // Use a target machine opcode to prevent further DAGCombine
15045 // optimizations that may separate the arithmetic operations
15046 // from the setcc node.
15047 switch (WideVal.getOpcode()) {
15049 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15050 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15051 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15052 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15053 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15057 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15058 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15059 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15060 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15061 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15067 // Emit a CMP with 0, which is the TEST pattern.
15068 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15069 DAG.getConstant(0, Op.getValueType()));
15071 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15072 SmallVector<SDValue, 4> Ops;
15073 for (unsigned i = 0; i != NumOperands; ++i)
15074 Ops.push_back(Op.getOperand(i));
15076 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15077 DAG.ReplaceAllUsesWith(Op, New);
15078 return SDValue(New.getNode(), 1);
15081 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15083 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15084 SDLoc dl, SelectionDAG &DAG) const {
15085 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15086 if (C->getAPIntValue() == 0)
15087 return EmitTest(Op0, X86CC, dl, DAG);
15089 if (Op0.getValueType() == MVT::i1)
15090 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15093 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15094 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15095 // Do the comparison at i32 if it's smaller, besides the Atom case.
15096 // This avoids subregister aliasing issues. Keep the smaller reference
15097 // if we're optimizing for size, however, as that'll allow better folding
15098 // of memory operations.
15099 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15100 !DAG.getMachineFunction().getFunction()->getAttributes().hasAttribute(
15101 AttributeSet::FunctionIndex, Attribute::MinSize) &&
15102 !Subtarget->isAtom()) {
15103 unsigned ExtendOp =
15104 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15105 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15106 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15108 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15109 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15110 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15112 return SDValue(Sub.getNode(), 1);
15114 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15117 /// Convert a comparison if required by the subtarget.
15118 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15119 SelectionDAG &DAG) const {
15120 // If the subtarget does not support the FUCOMI instruction, floating-point
15121 // comparisons have to be converted.
15122 if (Subtarget->hasCMov() ||
15123 Cmp.getOpcode() != X86ISD::CMP ||
15124 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15125 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15128 // The instruction selector will select an FUCOM instruction instead of
15129 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15130 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15131 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15133 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15134 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15135 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15136 DAG.getConstant(8, MVT::i8));
15137 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15138 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15141 /// The minimum architected relative accuracy is 2^-12. We need one
15142 /// Newton-Raphson step to have a good float result (24 bits of precision).
15143 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15144 DAGCombinerInfo &DCI,
15145 unsigned &RefinementSteps,
15146 bool &UseOneConstNR) const {
15147 // FIXME: We should use instruction latency models to calculate the cost of
15148 // each potential sequence, but this is very hard to do reliably because
15149 // at least Intel's Core* chips have variable timing based on the number of
15150 // significant digits in the divisor and/or sqrt operand.
15151 if (!Subtarget->useSqrtEst())
15154 EVT VT = Op.getValueType();
15156 // SSE1 has rsqrtss and rsqrtps.
15157 // TODO: Add support for AVX512 (v16f32).
15158 // It is likely not profitable to do this for f64 because a double-precision
15159 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15160 // instructions: convert to single, rsqrtss, convert back to double, refine
15161 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15162 // along with FMA, this could be a throughput win.
15163 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15164 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15165 RefinementSteps = 1;
15166 UseOneConstNR = false;
15167 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15172 /// The minimum architected relative accuracy is 2^-12. We need one
15173 /// Newton-Raphson step to have a good float result (24 bits of precision).
15174 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15175 DAGCombinerInfo &DCI,
15176 unsigned &RefinementSteps) const {
15177 // FIXME: We should use instruction latency models to calculate the cost of
15178 // each potential sequence, but this is very hard to do reliably because
15179 // at least Intel's Core* chips have variable timing based on the number of
15180 // significant digits in the divisor.
15181 if (!Subtarget->useReciprocalEst())
15184 EVT VT = Op.getValueType();
15186 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15187 // TODO: Add support for AVX512 (v16f32).
15188 // It is likely not profitable to do this for f64 because a double-precision
15189 // reciprocal estimate with refinement on x86 prior to FMA requires
15190 // 15 instructions: convert to single, rcpss, convert back to double, refine
15191 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15192 // along with FMA, this could be a throughput win.
15193 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15194 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15195 RefinementSteps = ReciprocalEstimateRefinementSteps;
15196 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15201 static bool isAllOnes(SDValue V) {
15202 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15203 return C && C->isAllOnesValue();
15206 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15207 /// if it's possible.
15208 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15209 SDLoc dl, SelectionDAG &DAG) const {
15210 SDValue Op0 = And.getOperand(0);
15211 SDValue Op1 = And.getOperand(1);
15212 if (Op0.getOpcode() == ISD::TRUNCATE)
15213 Op0 = Op0.getOperand(0);
15214 if (Op1.getOpcode() == ISD::TRUNCATE)
15215 Op1 = Op1.getOperand(0);
15218 if (Op1.getOpcode() == ISD::SHL)
15219 std::swap(Op0, Op1);
15220 if (Op0.getOpcode() == ISD::SHL) {
15221 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15222 if (And00C->getZExtValue() == 1) {
15223 // If we looked past a truncate, check that it's only truncating away
15225 unsigned BitWidth = Op0.getValueSizeInBits();
15226 unsigned AndBitWidth = And.getValueSizeInBits();
15227 if (BitWidth > AndBitWidth) {
15229 DAG.computeKnownBits(Op0, Zeros, Ones);
15230 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15234 RHS = Op0.getOperand(1);
15236 } else if (Op1.getOpcode() == ISD::Constant) {
15237 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15238 uint64_t AndRHSVal = AndRHS->getZExtValue();
15239 SDValue AndLHS = Op0;
15241 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15242 LHS = AndLHS.getOperand(0);
15243 RHS = AndLHS.getOperand(1);
15246 // Use BT if the immediate can't be encoded in a TEST instruction.
15247 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15249 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15253 if (LHS.getNode()) {
15254 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15255 // instruction. Since the shift amount is in-range-or-undefined, we know
15256 // that doing a bittest on the i32 value is ok. We extend to i32 because
15257 // the encoding for the i16 version is larger than the i32 version.
15258 // Also promote i16 to i32 for performance / code size reason.
15259 if (LHS.getValueType() == MVT::i8 ||
15260 LHS.getValueType() == MVT::i16)
15261 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15263 // If the operand types disagree, extend the shift amount to match. Since
15264 // BT ignores high bits (like shifts) we can use anyextend.
15265 if (LHS.getValueType() != RHS.getValueType())
15266 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15268 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15269 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15270 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15271 DAG.getConstant(Cond, MVT::i8), BT);
15277 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15279 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15284 // SSE Condition code mapping:
15293 switch (SetCCOpcode) {
15294 default: llvm_unreachable("Unexpected SETCC condition");
15296 case ISD::SETEQ: SSECC = 0; break;
15298 case ISD::SETGT: Swap = true; // Fallthrough
15300 case ISD::SETOLT: SSECC = 1; break;
15302 case ISD::SETGE: Swap = true; // Fallthrough
15304 case ISD::SETOLE: SSECC = 2; break;
15305 case ISD::SETUO: SSECC = 3; break;
15307 case ISD::SETNE: SSECC = 4; break;
15308 case ISD::SETULE: Swap = true; // Fallthrough
15309 case ISD::SETUGE: SSECC = 5; break;
15310 case ISD::SETULT: Swap = true; // Fallthrough
15311 case ISD::SETUGT: SSECC = 6; break;
15312 case ISD::SETO: SSECC = 7; break;
15314 case ISD::SETONE: SSECC = 8; break;
15317 std::swap(Op0, Op1);
15322 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15323 // ones, and then concatenate the result back.
15324 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15325 MVT VT = Op.getSimpleValueType();
15327 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15328 "Unsupported value type for operation");
15330 unsigned NumElems = VT.getVectorNumElements();
15332 SDValue CC = Op.getOperand(2);
15334 // Extract the LHS vectors
15335 SDValue LHS = Op.getOperand(0);
15336 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15337 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15339 // Extract the RHS vectors
15340 SDValue RHS = Op.getOperand(1);
15341 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15342 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15344 // Issue the operation on the smaller types and concatenate the result back
15345 MVT EltVT = VT.getVectorElementType();
15346 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15347 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15348 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15349 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15352 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15353 const X86Subtarget *Subtarget) {
15354 SDValue Op0 = Op.getOperand(0);
15355 SDValue Op1 = Op.getOperand(1);
15356 SDValue CC = Op.getOperand(2);
15357 MVT VT = Op.getSimpleValueType();
15360 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15361 Op.getValueType().getScalarType() == MVT::i1 &&
15362 "Cannot set masked compare for this operation");
15364 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15366 bool Unsigned = false;
15369 switch (SetCCOpcode) {
15370 default: llvm_unreachable("Unexpected SETCC condition");
15371 case ISD::SETNE: SSECC = 4; break;
15372 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15373 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15374 case ISD::SETLT: Swap = true; //fall-through
15375 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15376 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15377 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15378 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15379 case ISD::SETULE: Unsigned = true; //fall-through
15380 case ISD::SETLE: SSECC = 2; break;
15384 std::swap(Op0, Op1);
15386 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15387 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15388 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15389 DAG.getConstant(SSECC, MVT::i8));
15392 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15393 /// operand \p Op1. If non-trivial (for example because it's not constant)
15394 /// return an empty value.
15395 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15397 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15401 MVT VT = Op1.getSimpleValueType();
15402 MVT EVT = VT.getVectorElementType();
15403 unsigned n = VT.getVectorNumElements();
15404 SmallVector<SDValue, 8> ULTOp1;
15406 for (unsigned i = 0; i < n; ++i) {
15407 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15408 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15411 // Avoid underflow.
15412 APInt Val = Elt->getAPIntValue();
15416 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15419 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15422 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15423 SelectionDAG &DAG) {
15424 SDValue Op0 = Op.getOperand(0);
15425 SDValue Op1 = Op.getOperand(1);
15426 SDValue CC = Op.getOperand(2);
15427 MVT VT = Op.getSimpleValueType();
15428 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15429 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15434 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15435 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15438 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15439 unsigned Opc = X86ISD::CMPP;
15440 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15441 assert(VT.getVectorNumElements() <= 16);
15442 Opc = X86ISD::CMPM;
15444 // In the two special cases we can't handle, emit two comparisons.
15447 unsigned CombineOpc;
15448 if (SetCCOpcode == ISD::SETUEQ) {
15449 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15451 assert(SetCCOpcode == ISD::SETONE);
15452 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15455 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15456 DAG.getConstant(CC0, MVT::i8));
15457 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15458 DAG.getConstant(CC1, MVT::i8));
15459 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15461 // Handle all other FP comparisons here.
15462 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15463 DAG.getConstant(SSECC, MVT::i8));
15466 // Break 256-bit integer vector compare into smaller ones.
15467 if (VT.is256BitVector() && !Subtarget->hasInt256())
15468 return Lower256IntVSETCC(Op, DAG);
15470 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15471 EVT OpVT = Op1.getValueType();
15472 if (Subtarget->hasAVX512()) {
15473 if (Op1.getValueType().is512BitVector() ||
15474 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15475 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15476 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15478 // In AVX-512 architecture setcc returns mask with i1 elements,
15479 // But there is no compare instruction for i8 and i16 elements in KNL.
15480 // We are not talking about 512-bit operands in this case, these
15481 // types are illegal.
15483 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15484 OpVT.getVectorElementType().getSizeInBits() >= 8))
15485 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15486 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15489 // We are handling one of the integer comparisons here. Since SSE only has
15490 // GT and EQ comparisons for integer, swapping operands and multiple
15491 // operations may be required for some comparisons.
15493 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15494 bool Subus = false;
15496 switch (SetCCOpcode) {
15497 default: llvm_unreachable("Unexpected SETCC condition");
15498 case ISD::SETNE: Invert = true;
15499 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15500 case ISD::SETLT: Swap = true;
15501 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15502 case ISD::SETGE: Swap = true;
15503 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15504 Invert = true; break;
15505 case ISD::SETULT: Swap = true;
15506 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15507 FlipSigns = true; break;
15508 case ISD::SETUGE: Swap = true;
15509 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15510 FlipSigns = true; Invert = true; break;
15513 // Special case: Use min/max operations for SETULE/SETUGE
15514 MVT VET = VT.getVectorElementType();
15516 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15517 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15520 switch (SetCCOpcode) {
15522 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15523 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15526 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15529 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15530 if (!MinMax && hasSubus) {
15531 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15533 // t = psubus Op0, Op1
15534 // pcmpeq t, <0..0>
15535 switch (SetCCOpcode) {
15537 case ISD::SETULT: {
15538 // If the comparison is against a constant we can turn this into a
15539 // setule. With psubus, setule does not require a swap. This is
15540 // beneficial because the constant in the register is no longer
15541 // destructed as the destination so it can be hoisted out of a loop.
15542 // Only do this pre-AVX since vpcmp* is no longer destructive.
15543 if (Subtarget->hasAVX())
15545 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
15546 if (ULEOp1.getNode()) {
15548 Subus = true; Invert = false; Swap = false;
15552 // Psubus is better than flip-sign because it requires no inversion.
15553 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15554 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15558 Opc = X86ISD::SUBUS;
15564 std::swap(Op0, Op1);
15566 // Check that the operation in question is available (most are plain SSE2,
15567 // but PCMPGTQ and PCMPEQQ have different requirements).
15568 if (VT == MVT::v2i64) {
15569 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
15570 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
15572 // First cast everything to the right type.
15573 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15574 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15576 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15577 // bits of the inputs before performing those operations. The lower
15578 // compare is always unsigned.
15581 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
15583 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
15584 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
15585 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
15586 Sign, Zero, Sign, Zero);
15588 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15589 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15591 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15592 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15593 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15595 // Create masks for only the low parts/high parts of the 64 bit integers.
15596 static const int MaskHi[] = { 1, 1, 3, 3 };
15597 static const int MaskLo[] = { 0, 0, 2, 2 };
15598 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
15599 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
15600 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
15602 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
15603 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
15606 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15608 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15611 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
15612 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
15613 // pcmpeqd + pshufd + pand.
15614 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
15616 // First cast everything to the right type.
15617 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15618 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15621 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
15623 // Make sure the lower and upper halves are both all-ones.
15624 static const int Mask[] = { 1, 0, 3, 2 };
15625 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
15626 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
15629 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15631 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15635 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15636 // bits of the inputs before performing those operations.
15638 EVT EltVT = VT.getVectorElementType();
15639 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
15640 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
15641 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
15644 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
15646 // If the logical-not of the result is required, perform that now.
15648 Result = DAG.getNOT(dl, Result, VT);
15651 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
15654 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
15655 getZeroVector(VT, Subtarget, DAG, dl));
15660 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
15662 MVT VT = Op.getSimpleValueType();
15664 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
15666 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
15667 && "SetCC type must be 8-bit or 1-bit integer");
15668 SDValue Op0 = Op.getOperand(0);
15669 SDValue Op1 = Op.getOperand(1);
15671 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15673 // Optimize to BT if possible.
15674 // Lower (X & (1 << N)) == 0 to BT(X, N).
15675 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
15676 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
15677 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
15678 Op1.getOpcode() == ISD::Constant &&
15679 cast<ConstantSDNode>(Op1)->isNullValue() &&
15680 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15681 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
15682 if (NewSetCC.getNode()) {
15684 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
15689 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
15691 if (Op1.getOpcode() == ISD::Constant &&
15692 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
15693 cast<ConstantSDNode>(Op1)->isNullValue()) &&
15694 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15696 // If the input is a setcc, then reuse the input setcc or use a new one with
15697 // the inverted condition.
15698 if (Op0.getOpcode() == X86ISD::SETCC) {
15699 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
15700 bool Invert = (CC == ISD::SETNE) ^
15701 cast<ConstantSDNode>(Op1)->isNullValue();
15705 CCode = X86::GetOppositeBranchCondition(CCode);
15706 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15707 DAG.getConstant(CCode, MVT::i8),
15708 Op0.getOperand(1));
15710 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
15714 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
15715 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
15716 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15718 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
15719 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
15722 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
15723 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
15724 if (X86CC == X86::COND_INVALID)
15727 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
15728 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
15729 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15730 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
15732 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
15736 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
15737 static bool isX86LogicalCmp(SDValue Op) {
15738 unsigned Opc = Op.getNode()->getOpcode();
15739 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
15740 Opc == X86ISD::SAHF)
15742 if (Op.getResNo() == 1 &&
15743 (Opc == X86ISD::ADD ||
15744 Opc == X86ISD::SUB ||
15745 Opc == X86ISD::ADC ||
15746 Opc == X86ISD::SBB ||
15747 Opc == X86ISD::SMUL ||
15748 Opc == X86ISD::UMUL ||
15749 Opc == X86ISD::INC ||
15750 Opc == X86ISD::DEC ||
15751 Opc == X86ISD::OR ||
15752 Opc == X86ISD::XOR ||
15753 Opc == X86ISD::AND))
15756 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
15762 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
15763 if (V.getOpcode() != ISD::TRUNCATE)
15766 SDValue VOp0 = V.getOperand(0);
15767 unsigned InBits = VOp0.getValueSizeInBits();
15768 unsigned Bits = V.getValueSizeInBits();
15769 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
15772 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
15773 bool addTest = true;
15774 SDValue Cond = Op.getOperand(0);
15775 SDValue Op1 = Op.getOperand(1);
15776 SDValue Op2 = Op.getOperand(2);
15778 EVT VT = Op1.getValueType();
15781 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
15782 // are available. Otherwise fp cmovs get lowered into a less efficient branch
15783 // sequence later on.
15784 if (Cond.getOpcode() == ISD::SETCC &&
15785 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
15786 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
15787 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
15788 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
15789 int SSECC = translateX86FSETCC(
15790 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
15793 if (Subtarget->hasAVX512()) {
15794 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
15795 DAG.getConstant(SSECC, MVT::i8));
15796 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
15798 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
15799 DAG.getConstant(SSECC, MVT::i8));
15800 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
15801 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
15802 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
15806 if (Cond.getOpcode() == ISD::SETCC) {
15807 SDValue NewCond = LowerSETCC(Cond, DAG);
15808 if (NewCond.getNode())
15812 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
15813 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
15814 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
15815 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
15816 if (Cond.getOpcode() == X86ISD::SETCC &&
15817 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
15818 isZero(Cond.getOperand(1).getOperand(1))) {
15819 SDValue Cmp = Cond.getOperand(1);
15821 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
15823 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
15824 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
15825 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
15827 SDValue CmpOp0 = Cmp.getOperand(0);
15828 // Apply further optimizations for special cases
15829 // (select (x != 0), -1, 0) -> neg & sbb
15830 // (select (x == 0), 0, -1) -> neg & sbb
15831 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
15832 if (YC->isNullValue() &&
15833 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
15834 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
15835 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
15836 DAG.getConstant(0, CmpOp0.getValueType()),
15838 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15839 DAG.getConstant(X86::COND_B, MVT::i8),
15840 SDValue(Neg.getNode(), 1));
15844 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
15845 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
15846 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
15848 SDValue Res = // Res = 0 or -1.
15849 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15850 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
15852 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
15853 Res = DAG.getNOT(DL, Res, Res.getValueType());
15855 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
15856 if (!N2C || !N2C->isNullValue())
15857 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
15862 // Look past (and (setcc_carry (cmp ...)), 1).
15863 if (Cond.getOpcode() == ISD::AND &&
15864 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
15865 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
15866 if (C && C->getAPIntValue() == 1)
15867 Cond = Cond.getOperand(0);
15870 // If condition flag is set by a X86ISD::CMP, then use it as the condition
15871 // setting operand in place of the X86ISD::SETCC.
15872 unsigned CondOpcode = Cond.getOpcode();
15873 if (CondOpcode == X86ISD::SETCC ||
15874 CondOpcode == X86ISD::SETCC_CARRY) {
15875 CC = Cond.getOperand(0);
15877 SDValue Cmp = Cond.getOperand(1);
15878 unsigned Opc = Cmp.getOpcode();
15879 MVT VT = Op.getSimpleValueType();
15881 bool IllegalFPCMov = false;
15882 if (VT.isFloatingPoint() && !VT.isVector() &&
15883 !isScalarFPTypeInSSEReg(VT)) // FPStack?
15884 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
15886 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
15887 Opc == X86ISD::BT) { // FIXME
15891 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
15892 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
15893 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
15894 Cond.getOperand(0).getValueType() != MVT::i8)) {
15895 SDValue LHS = Cond.getOperand(0);
15896 SDValue RHS = Cond.getOperand(1);
15897 unsigned X86Opcode;
15900 switch (CondOpcode) {
15901 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
15902 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
15903 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
15904 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
15905 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
15906 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
15907 default: llvm_unreachable("unexpected overflowing operator");
15909 if (CondOpcode == ISD::UMULO)
15910 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
15913 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
15915 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
15917 if (CondOpcode == ISD::UMULO)
15918 Cond = X86Op.getValue(2);
15920 Cond = X86Op.getValue(1);
15922 CC = DAG.getConstant(X86Cond, MVT::i8);
15927 // Look pass the truncate if the high bits are known zero.
15928 if (isTruncWithZeroHighBitsInput(Cond, DAG))
15929 Cond = Cond.getOperand(0);
15931 // We know the result of AND is compared against zero. Try to match
15933 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
15934 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
15935 if (NewSetCC.getNode()) {
15936 CC = NewSetCC.getOperand(0);
15937 Cond = NewSetCC.getOperand(1);
15944 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
15945 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
15948 // a < b ? -1 : 0 -> RES = ~setcc_carry
15949 // a < b ? 0 : -1 -> RES = setcc_carry
15950 // a >= b ? -1 : 0 -> RES = setcc_carry
15951 // a >= b ? 0 : -1 -> RES = ~setcc_carry
15952 if (Cond.getOpcode() == X86ISD::SUB) {
15953 Cond = ConvertCmpIfNecessary(Cond, DAG);
15954 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
15956 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
15957 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
15958 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15959 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
15960 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
15961 return DAG.getNOT(DL, Res, Res.getValueType());
15966 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
15967 // widen the cmov and push the truncate through. This avoids introducing a new
15968 // branch during isel and doesn't add any extensions.
15969 if (Op.getValueType() == MVT::i8 &&
15970 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
15971 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
15972 if (T1.getValueType() == T2.getValueType() &&
15973 // Blacklist CopyFromReg to avoid partial register stalls.
15974 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
15975 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
15976 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
15977 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
15981 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
15982 // condition is true.
15983 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
15984 SDValue Ops[] = { Op2, Op1, CC, Cond };
15985 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
15988 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
15989 SelectionDAG &DAG) {
15990 MVT VT = Op->getSimpleValueType(0);
15991 SDValue In = Op->getOperand(0);
15992 MVT InVT = In.getSimpleValueType();
15993 MVT VTElt = VT.getVectorElementType();
15994 MVT InVTElt = InVT.getVectorElementType();
15998 if ((InVTElt == MVT::i1) &&
15999 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
16000 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16002 ((Subtarget->hasBWI() && VT.is512BitVector() &&
16003 VTElt.getSizeInBits() <= 16)) ||
16005 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16006 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16008 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16009 VTElt.getSizeInBits() >= 32))))
16010 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16012 unsigned int NumElts = VT.getVectorNumElements();
16014 if (NumElts != 8 && NumElts != 16)
16017 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16018 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16019 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16020 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16023 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16024 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16026 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16027 Constant *C = ConstantInt::get(*DAG.getContext(),
16028 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16030 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16031 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16032 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16033 MachinePointerInfo::getConstantPool(),
16034 false, false, false, Alignment);
16035 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16036 if (VT.is512BitVector())
16038 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16041 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16042 SelectionDAG &DAG) {
16043 MVT VT = Op->getSimpleValueType(0);
16044 SDValue In = Op->getOperand(0);
16045 MVT InVT = In.getSimpleValueType();
16048 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16049 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16051 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16052 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16053 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16056 if (Subtarget->hasInt256())
16057 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16059 // Optimize vectors in AVX mode
16060 // Sign extend v8i16 to v8i32 and
16063 // Divide input vector into two parts
16064 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16065 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16066 // concat the vectors to original VT
16068 unsigned NumElems = InVT.getVectorNumElements();
16069 SDValue Undef = DAG.getUNDEF(InVT);
16071 SmallVector<int,8> ShufMask1(NumElems, -1);
16072 for (unsigned i = 0; i != NumElems/2; ++i)
16075 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16077 SmallVector<int,8> ShufMask2(NumElems, -1);
16078 for (unsigned i = 0; i != NumElems/2; ++i)
16079 ShufMask2[i] = i + NumElems/2;
16081 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16083 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16084 VT.getVectorNumElements()/2);
16086 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16087 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16089 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16092 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16093 // may emit an illegal shuffle but the expansion is still better than scalar
16094 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16095 // we'll emit a shuffle and a arithmetic shift.
16096 // TODO: It is possible to support ZExt by zeroing the undef values during
16097 // the shuffle phase or after the shuffle.
16098 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16099 SelectionDAG &DAG) {
16100 MVT RegVT = Op.getSimpleValueType();
16101 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16102 assert(RegVT.isInteger() &&
16103 "We only custom lower integer vector sext loads.");
16105 // Nothing useful we can do without SSE2 shuffles.
16106 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16108 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16110 EVT MemVT = Ld->getMemoryVT();
16111 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16112 unsigned RegSz = RegVT.getSizeInBits();
16114 ISD::LoadExtType Ext = Ld->getExtensionType();
16116 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16117 && "Only anyext and sext are currently implemented.");
16118 assert(MemVT != RegVT && "Cannot extend to the same type");
16119 assert(MemVT.isVector() && "Must load a vector from memory");
16121 unsigned NumElems = RegVT.getVectorNumElements();
16122 unsigned MemSz = MemVT.getSizeInBits();
16123 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16125 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16126 // The only way in which we have a legal 256-bit vector result but not the
16127 // integer 256-bit operations needed to directly lower a sextload is if we
16128 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16129 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16130 // correctly legalized. We do this late to allow the canonical form of
16131 // sextload to persist throughout the rest of the DAG combiner -- it wants
16132 // to fold together any extensions it can, and so will fuse a sign_extend
16133 // of an sextload into a sextload targeting a wider value.
16135 if (MemSz == 128) {
16136 // Just switch this to a normal load.
16137 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16138 "it must be a legal 128-bit vector "
16140 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16141 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16142 Ld->isInvariant(), Ld->getAlignment());
16144 assert(MemSz < 128 &&
16145 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16146 // Do an sext load to a 128-bit vector type. We want to use the same
16147 // number of elements, but elements half as wide. This will end up being
16148 // recursively lowered by this routine, but will succeed as we definitely
16149 // have all the necessary features if we're using AVX1.
16151 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16152 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16154 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16155 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16156 Ld->isNonTemporal(), Ld->isInvariant(),
16157 Ld->getAlignment());
16160 // Replace chain users with the new chain.
16161 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16162 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16164 // Finally, do a normal sign-extend to the desired register.
16165 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16168 // All sizes must be a power of two.
16169 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16170 "Non-power-of-two elements are not custom lowered!");
16172 // Attempt to load the original value using scalar loads.
16173 // Find the largest scalar type that divides the total loaded size.
16174 MVT SclrLoadTy = MVT::i8;
16175 for (MVT Tp : MVT::integer_valuetypes()) {
16176 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16181 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16182 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16184 SclrLoadTy = MVT::f64;
16186 // Calculate the number of scalar loads that we need to perform
16187 // in order to load our vector from memory.
16188 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16190 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16191 "Can only lower sext loads with a single scalar load!");
16193 unsigned loadRegZize = RegSz;
16194 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16197 // Represent our vector as a sequence of elements which are the
16198 // largest scalar that we can load.
16199 EVT LoadUnitVecVT = EVT::getVectorVT(
16200 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16202 // Represent the data using the same element type that is stored in
16203 // memory. In practice, we ''widen'' MemVT.
16205 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16206 loadRegZize / MemVT.getScalarType().getSizeInBits());
16208 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16209 "Invalid vector type");
16211 // We can't shuffle using an illegal type.
16212 assert(TLI.isTypeLegal(WideVecVT) &&
16213 "We only lower types that form legal widened vector types");
16215 SmallVector<SDValue, 8> Chains;
16216 SDValue Ptr = Ld->getBasePtr();
16217 SDValue Increment =
16218 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16219 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16221 for (unsigned i = 0; i < NumLoads; ++i) {
16222 // Perform a single load.
16223 SDValue ScalarLoad =
16224 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16225 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16226 Ld->getAlignment());
16227 Chains.push_back(ScalarLoad.getValue(1));
16228 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16229 // another round of DAGCombining.
16231 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16233 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16234 ScalarLoad, DAG.getIntPtrConstant(i));
16236 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16239 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16241 // Bitcast the loaded value to a vector of the original element type, in
16242 // the size of the target vector type.
16243 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16244 unsigned SizeRatio = RegSz / MemSz;
16246 if (Ext == ISD::SEXTLOAD) {
16247 // If we have SSE4.1, we can directly emit a VSEXT node.
16248 if (Subtarget->hasSSE41()) {
16249 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16250 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16254 // Otherwise we'll shuffle the small elements in the high bits of the
16255 // larger type and perform an arithmetic shift. If the shift is not legal
16256 // it's better to scalarize.
16257 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16258 "We can't implement a sext load without an arithmetic right shift!");
16260 // Redistribute the loaded elements into the different locations.
16261 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16262 for (unsigned i = 0; i != NumElems; ++i)
16263 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16265 SDValue Shuff = DAG.getVectorShuffle(
16266 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16268 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16270 // Build the arithmetic shift.
16271 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16272 MemVT.getVectorElementType().getSizeInBits();
16274 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16276 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16280 // Redistribute the loaded elements into the different locations.
16281 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16282 for (unsigned i = 0; i != NumElems; ++i)
16283 ShuffleVec[i * SizeRatio] = i;
16285 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16286 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16288 // Bitcast to the requested type.
16289 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16290 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16294 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16295 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16296 // from the AND / OR.
16297 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16298 Opc = Op.getOpcode();
16299 if (Opc != ISD::OR && Opc != ISD::AND)
16301 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16302 Op.getOperand(0).hasOneUse() &&
16303 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16304 Op.getOperand(1).hasOneUse());
16307 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16308 // 1 and that the SETCC node has a single use.
16309 static bool isXor1OfSetCC(SDValue Op) {
16310 if (Op.getOpcode() != ISD::XOR)
16312 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16313 if (N1C && N1C->getAPIntValue() == 1) {
16314 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16315 Op.getOperand(0).hasOneUse();
16320 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16321 bool addTest = true;
16322 SDValue Chain = Op.getOperand(0);
16323 SDValue Cond = Op.getOperand(1);
16324 SDValue Dest = Op.getOperand(2);
16327 bool Inverted = false;
16329 if (Cond.getOpcode() == ISD::SETCC) {
16330 // Check for setcc([su]{add,sub,mul}o == 0).
16331 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16332 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16333 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16334 Cond.getOperand(0).getResNo() == 1 &&
16335 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16336 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16337 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16338 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16339 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16340 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16342 Cond = Cond.getOperand(0);
16344 SDValue NewCond = LowerSETCC(Cond, DAG);
16345 if (NewCond.getNode())
16350 // FIXME: LowerXALUO doesn't handle these!!
16351 else if (Cond.getOpcode() == X86ISD::ADD ||
16352 Cond.getOpcode() == X86ISD::SUB ||
16353 Cond.getOpcode() == X86ISD::SMUL ||
16354 Cond.getOpcode() == X86ISD::UMUL)
16355 Cond = LowerXALUO(Cond, DAG);
16358 // Look pass (and (setcc_carry (cmp ...)), 1).
16359 if (Cond.getOpcode() == ISD::AND &&
16360 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16361 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16362 if (C && C->getAPIntValue() == 1)
16363 Cond = Cond.getOperand(0);
16366 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16367 // setting operand in place of the X86ISD::SETCC.
16368 unsigned CondOpcode = Cond.getOpcode();
16369 if (CondOpcode == X86ISD::SETCC ||
16370 CondOpcode == X86ISD::SETCC_CARRY) {
16371 CC = Cond.getOperand(0);
16373 SDValue Cmp = Cond.getOperand(1);
16374 unsigned Opc = Cmp.getOpcode();
16375 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16376 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16380 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16384 // These can only come from an arithmetic instruction with overflow,
16385 // e.g. SADDO, UADDO.
16386 Cond = Cond.getNode()->getOperand(1);
16392 CondOpcode = Cond.getOpcode();
16393 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16394 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16395 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16396 Cond.getOperand(0).getValueType() != MVT::i8)) {
16397 SDValue LHS = Cond.getOperand(0);
16398 SDValue RHS = Cond.getOperand(1);
16399 unsigned X86Opcode;
16402 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16403 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16405 switch (CondOpcode) {
16406 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16408 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16410 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16413 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16414 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16416 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16418 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16421 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16422 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16423 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16424 default: llvm_unreachable("unexpected overflowing operator");
16427 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16428 if (CondOpcode == ISD::UMULO)
16429 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16432 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16434 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16436 if (CondOpcode == ISD::UMULO)
16437 Cond = X86Op.getValue(2);
16439 Cond = X86Op.getValue(1);
16441 CC = DAG.getConstant(X86Cond, MVT::i8);
16445 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16446 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16447 if (CondOpc == ISD::OR) {
16448 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16449 // two branches instead of an explicit OR instruction with a
16451 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16452 isX86LogicalCmp(Cmp)) {
16453 CC = Cond.getOperand(0).getOperand(0);
16454 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16455 Chain, Dest, CC, Cmp);
16456 CC = Cond.getOperand(1).getOperand(0);
16460 } else { // ISD::AND
16461 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16462 // two branches instead of an explicit AND instruction with a
16463 // separate test. However, we only do this if this block doesn't
16464 // have a fall-through edge, because this requires an explicit
16465 // jmp when the condition is false.
16466 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16467 isX86LogicalCmp(Cmp) &&
16468 Op.getNode()->hasOneUse()) {
16469 X86::CondCode CCode =
16470 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16471 CCode = X86::GetOppositeBranchCondition(CCode);
16472 CC = DAG.getConstant(CCode, MVT::i8);
16473 SDNode *User = *Op.getNode()->use_begin();
16474 // Look for an unconditional branch following this conditional branch.
16475 // We need this because we need to reverse the successors in order
16476 // to implement FCMP_OEQ.
16477 if (User->getOpcode() == ISD::BR) {
16478 SDValue FalseBB = User->getOperand(1);
16480 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16481 assert(NewBR == User);
16485 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16486 Chain, Dest, CC, Cmp);
16487 X86::CondCode CCode =
16488 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16489 CCode = X86::GetOppositeBranchCondition(CCode);
16490 CC = DAG.getConstant(CCode, MVT::i8);
16496 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16497 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16498 // It should be transformed during dag combiner except when the condition
16499 // is set by a arithmetics with overflow node.
16500 X86::CondCode CCode =
16501 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16502 CCode = X86::GetOppositeBranchCondition(CCode);
16503 CC = DAG.getConstant(CCode, MVT::i8);
16504 Cond = Cond.getOperand(0).getOperand(1);
16506 } else if (Cond.getOpcode() == ISD::SETCC &&
16507 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16508 // For FCMP_OEQ, we can emit
16509 // two branches instead of an explicit AND instruction with a
16510 // separate test. However, we only do this if this block doesn't
16511 // have a fall-through edge, because this requires an explicit
16512 // jmp when the condition is false.
16513 if (Op.getNode()->hasOneUse()) {
16514 SDNode *User = *Op.getNode()->use_begin();
16515 // Look for an unconditional branch following this conditional branch.
16516 // We need this because we need to reverse the successors in order
16517 // to implement FCMP_OEQ.
16518 if (User->getOpcode() == ISD::BR) {
16519 SDValue FalseBB = User->getOperand(1);
16521 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16522 assert(NewBR == User);
16526 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16527 Cond.getOperand(0), Cond.getOperand(1));
16528 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16529 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16530 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16531 Chain, Dest, CC, Cmp);
16532 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16537 } else if (Cond.getOpcode() == ISD::SETCC &&
16538 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16539 // For FCMP_UNE, we can emit
16540 // two branches instead of an explicit AND instruction with a
16541 // separate test. However, we only do this if this block doesn't
16542 // have a fall-through edge, because this requires an explicit
16543 // jmp when the condition is false.
16544 if (Op.getNode()->hasOneUse()) {
16545 SDNode *User = *Op.getNode()->use_begin();
16546 // Look for an unconditional branch following this conditional branch.
16547 // We need this because we need to reverse the successors in order
16548 // to implement FCMP_UNE.
16549 if (User->getOpcode() == ISD::BR) {
16550 SDValue FalseBB = User->getOperand(1);
16552 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16553 assert(NewBR == User);
16556 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16557 Cond.getOperand(0), Cond.getOperand(1));
16558 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16559 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16560 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16561 Chain, Dest, CC, Cmp);
16562 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
16572 // Look pass the truncate if the high bits are known zero.
16573 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16574 Cond = Cond.getOperand(0);
16576 // We know the result of AND is compared against zero. Try to match
16578 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16579 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
16580 if (NewSetCC.getNode()) {
16581 CC = NewSetCC.getOperand(0);
16582 Cond = NewSetCC.getOperand(1);
16589 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16590 CC = DAG.getConstant(X86Cond, MVT::i8);
16591 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16593 Cond = ConvertCmpIfNecessary(Cond, DAG);
16594 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16595 Chain, Dest, CC, Cond);
16598 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
16599 // Calls to _alloca are needed to probe the stack when allocating more than 4k
16600 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
16601 // that the guard pages used by the OS virtual memory manager are allocated in
16602 // correct sequence.
16604 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
16605 SelectionDAG &DAG) const {
16606 MachineFunction &MF = DAG.getMachineFunction();
16607 bool SplitStack = MF.shouldSplitStack();
16608 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
16613 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16614 SDNode* Node = Op.getNode();
16616 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
16617 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
16618 " not tell us which reg is the stack pointer!");
16619 EVT VT = Node->getValueType(0);
16620 SDValue Tmp1 = SDValue(Node, 0);
16621 SDValue Tmp2 = SDValue(Node, 1);
16622 SDValue Tmp3 = Node->getOperand(2);
16623 SDValue Chain = Tmp1.getOperand(0);
16625 // Chain the dynamic stack allocation so that it doesn't modify the stack
16626 // pointer when other instructions are using the stack.
16627 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
16630 SDValue Size = Tmp2.getOperand(1);
16631 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
16632 Chain = SP.getValue(1);
16633 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
16634 const TargetFrameLowering &TFI = *DAG.getSubtarget().getFrameLowering();
16635 unsigned StackAlign = TFI.getStackAlignment();
16636 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
16637 if (Align > StackAlign)
16638 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
16639 DAG.getConstant(-(uint64_t)Align, VT));
16640 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
16642 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
16643 DAG.getIntPtrConstant(0, true), SDValue(),
16646 SDValue Ops[2] = { Tmp1, Tmp2 };
16647 return DAG.getMergeValues(Ops, dl);
16651 SDValue Chain = Op.getOperand(0);
16652 SDValue Size = Op.getOperand(1);
16653 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
16654 EVT VT = Op.getNode()->getValueType(0);
16656 bool Is64Bit = Subtarget->is64Bit();
16657 EVT SPTy = getPointerTy();
16660 MachineRegisterInfo &MRI = MF.getRegInfo();
16663 // The 64 bit implementation of segmented stacks needs to clobber both r10
16664 // r11. This makes it impossible to use it along with nested parameters.
16665 const Function *F = MF.getFunction();
16667 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
16669 if (I->hasNestAttr())
16670 report_fatal_error("Cannot use segmented stacks with functions that "
16671 "have nested arguments.");
16674 const TargetRegisterClass *AddrRegClass =
16675 getRegClassFor(getPointerTy());
16676 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
16677 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
16678 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
16679 DAG.getRegister(Vreg, SPTy));
16680 SDValue Ops1[2] = { Value, Chain };
16681 return DAG.getMergeValues(Ops1, dl);
16684 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
16686 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
16687 Flag = Chain.getValue(1);
16688 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
16690 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
16692 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
16693 DAG.getSubtarget().getRegisterInfo());
16694 unsigned SPReg = RegInfo->getStackRegister();
16695 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
16696 Chain = SP.getValue(1);
16699 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
16700 DAG.getConstant(-(uint64_t)Align, VT));
16701 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
16704 SDValue Ops1[2] = { SP, Chain };
16705 return DAG.getMergeValues(Ops1, dl);
16709 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
16710 MachineFunction &MF = DAG.getMachineFunction();
16711 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
16713 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
16716 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
16717 // vastart just stores the address of the VarArgsFrameIndex slot into the
16718 // memory location argument.
16719 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
16721 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
16722 MachinePointerInfo(SV), false, false, 0);
16726 // gp_offset (0 - 6 * 8)
16727 // fp_offset (48 - 48 + 8 * 16)
16728 // overflow_arg_area (point to parameters coming in memory).
16730 SmallVector<SDValue, 8> MemOps;
16731 SDValue FIN = Op.getOperand(1);
16733 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
16734 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
16736 FIN, MachinePointerInfo(SV), false, false, 0);
16737 MemOps.push_back(Store);
16740 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16741 FIN, DAG.getIntPtrConstant(4));
16742 Store = DAG.getStore(Op.getOperand(0), DL,
16743 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
16745 FIN, MachinePointerInfo(SV, 4), false, false, 0);
16746 MemOps.push_back(Store);
16748 // Store ptr to overflow_arg_area
16749 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16750 FIN, DAG.getIntPtrConstant(4));
16751 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
16753 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
16754 MachinePointerInfo(SV, 8),
16756 MemOps.push_back(Store);
16758 // Store ptr to reg_save_area.
16759 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16760 FIN, DAG.getIntPtrConstant(8));
16761 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
16763 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
16764 MachinePointerInfo(SV, 16), false, false, 0);
16765 MemOps.push_back(Store);
16766 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
16769 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
16770 assert(Subtarget->is64Bit() &&
16771 "LowerVAARG only handles 64-bit va_arg!");
16772 assert((Subtarget->isTargetLinux() ||
16773 Subtarget->isTargetDarwin()) &&
16774 "Unhandled target in LowerVAARG");
16775 assert(Op.getNode()->getNumOperands() == 4);
16776 SDValue Chain = Op.getOperand(0);
16777 SDValue SrcPtr = Op.getOperand(1);
16778 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
16779 unsigned Align = Op.getConstantOperandVal(3);
16782 EVT ArgVT = Op.getNode()->getValueType(0);
16783 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
16784 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
16787 // Decide which area this value should be read from.
16788 // TODO: Implement the AMD64 ABI in its entirety. This simple
16789 // selection mechanism works only for the basic types.
16790 if (ArgVT == MVT::f80) {
16791 llvm_unreachable("va_arg for f80 not yet implemented");
16792 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
16793 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
16794 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
16795 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
16797 llvm_unreachable("Unhandled argument type in LowerVAARG");
16800 if (ArgMode == 2) {
16801 // Sanity Check: Make sure using fp_offset makes sense.
16802 assert(!DAG.getTarget().Options.UseSoftFloat &&
16803 !(DAG.getMachineFunction()
16804 .getFunction()->getAttributes()
16805 .hasAttribute(AttributeSet::FunctionIndex,
16806 Attribute::NoImplicitFloat)) &&
16807 Subtarget->hasSSE1());
16810 // Insert VAARG_64 node into the DAG
16811 // VAARG_64 returns two values: Variable Argument Address, Chain
16812 SmallVector<SDValue, 11> InstOps;
16813 InstOps.push_back(Chain);
16814 InstOps.push_back(SrcPtr);
16815 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
16816 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
16817 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
16818 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
16819 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
16820 VTs, InstOps, MVT::i64,
16821 MachinePointerInfo(SV),
16823 /*Volatile=*/false,
16825 /*WriteMem=*/true);
16826 Chain = VAARG.getValue(1);
16828 // Load the next argument and return it
16829 return DAG.getLoad(ArgVT, dl,
16832 MachinePointerInfo(),
16833 false, false, false, 0);
16836 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
16837 SelectionDAG &DAG) {
16838 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
16839 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
16840 SDValue Chain = Op.getOperand(0);
16841 SDValue DstPtr = Op.getOperand(1);
16842 SDValue SrcPtr = Op.getOperand(2);
16843 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
16844 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
16847 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
16848 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
16850 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
16853 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
16854 // amount is a constant. Takes immediate version of shift as input.
16855 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
16856 SDValue SrcOp, uint64_t ShiftAmt,
16857 SelectionDAG &DAG) {
16858 MVT ElementType = VT.getVectorElementType();
16860 // Fold this packed shift into its first operand if ShiftAmt is 0.
16864 // Check for ShiftAmt >= element width
16865 if (ShiftAmt >= ElementType.getSizeInBits()) {
16866 if (Opc == X86ISD::VSRAI)
16867 ShiftAmt = ElementType.getSizeInBits() - 1;
16869 return DAG.getConstant(0, VT);
16872 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
16873 && "Unknown target vector shift-by-constant node");
16875 // Fold this packed vector shift into a build vector if SrcOp is a
16876 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
16877 if (VT == SrcOp.getSimpleValueType() &&
16878 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
16879 SmallVector<SDValue, 8> Elts;
16880 unsigned NumElts = SrcOp->getNumOperands();
16881 ConstantSDNode *ND;
16884 default: llvm_unreachable(nullptr);
16885 case X86ISD::VSHLI:
16886 for (unsigned i=0; i!=NumElts; ++i) {
16887 SDValue CurrentOp = SrcOp->getOperand(i);
16888 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16889 Elts.push_back(CurrentOp);
16892 ND = cast<ConstantSDNode>(CurrentOp);
16893 const APInt &C = ND->getAPIntValue();
16894 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
16897 case X86ISD::VSRLI:
16898 for (unsigned i=0; i!=NumElts; ++i) {
16899 SDValue CurrentOp = SrcOp->getOperand(i);
16900 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16901 Elts.push_back(CurrentOp);
16904 ND = cast<ConstantSDNode>(CurrentOp);
16905 const APInt &C = ND->getAPIntValue();
16906 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
16909 case X86ISD::VSRAI:
16910 for (unsigned i=0; i!=NumElts; ++i) {
16911 SDValue CurrentOp = SrcOp->getOperand(i);
16912 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16913 Elts.push_back(CurrentOp);
16916 ND = cast<ConstantSDNode>(CurrentOp);
16917 const APInt &C = ND->getAPIntValue();
16918 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
16923 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
16926 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
16929 // getTargetVShiftNode - Handle vector element shifts where the shift amount
16930 // may or may not be a constant. Takes immediate version of shift as input.
16931 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
16932 SDValue SrcOp, SDValue ShAmt,
16933 SelectionDAG &DAG) {
16934 MVT SVT = ShAmt.getSimpleValueType();
16935 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
16937 // Catch shift-by-constant.
16938 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
16939 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
16940 CShAmt->getZExtValue(), DAG);
16942 // Change opcode to non-immediate version
16944 default: llvm_unreachable("Unknown target vector shift node");
16945 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
16946 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
16947 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
16950 const X86Subtarget &Subtarget =
16951 DAG.getTarget().getSubtarget<X86Subtarget>();
16952 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
16953 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
16954 // Let the shuffle legalizer expand this shift amount node.
16955 SDValue Op0 = ShAmt.getOperand(0);
16956 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
16957 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
16959 // Need to build a vector containing shift amount.
16960 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
16961 SmallVector<SDValue, 4> ShOps;
16962 ShOps.push_back(ShAmt);
16963 if (SVT == MVT::i32) {
16964 ShOps.push_back(DAG.getConstant(0, SVT));
16965 ShOps.push_back(DAG.getUNDEF(SVT));
16967 ShOps.push_back(DAG.getUNDEF(SVT));
16969 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
16970 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
16973 // The return type has to be a 128-bit type with the same element
16974 // type as the input type.
16975 MVT EltVT = VT.getVectorElementType();
16976 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
16978 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
16979 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
16982 /// \brief Return (and \p Op, \p Mask) for compare instructions or
16983 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
16984 /// necessary casting for \p Mask when lowering masking intrinsics.
16985 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
16986 SDValue PreservedSrc,
16987 const X86Subtarget *Subtarget,
16988 SelectionDAG &DAG) {
16989 EVT VT = Op.getValueType();
16990 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
16991 MVT::i1, VT.getVectorNumElements());
16992 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
16993 Mask.getValueType().getSizeInBits());
16996 assert(MaskVT.isSimple() && "invalid mask type");
16998 if (isAllOnes(Mask))
17001 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17002 // are extracted by EXTRACT_SUBVECTOR.
17003 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17004 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17005 DAG.getIntPtrConstant(0));
17007 switch (Op.getOpcode()) {
17009 case X86ISD::PCMPEQM:
17010 case X86ISD::PCMPGTM:
17012 case X86ISD::CMPMU:
17013 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17015 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17016 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17017 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17020 /// \brief Creates an SDNode for a predicated scalar operation.
17021 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17022 /// The mask is comming as MVT::i8 and it should be truncated
17023 /// to MVT::i1 while lowering masking intrinsics.
17024 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17025 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17026 /// a scalar instruction.
17027 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17028 SDValue PreservedSrc,
17029 const X86Subtarget *Subtarget,
17030 SelectionDAG &DAG) {
17031 if (isAllOnes(Mask))
17034 EVT VT = Op.getValueType();
17036 // The mask should be of type MVT::i1
17037 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17039 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17040 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17041 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17044 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17045 SelectionDAG &DAG) {
17047 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17048 EVT VT = Op.getValueType();
17049 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17051 switch(IntrData->Type) {
17052 case INTR_TYPE_1OP:
17053 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17054 case INTR_TYPE_2OP:
17055 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17057 case INTR_TYPE_3OP:
17058 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17059 Op.getOperand(2), Op.getOperand(3));
17060 case INTR_TYPE_1OP_MASK_RM: {
17061 SDValue Src = Op.getOperand(1);
17062 SDValue Src0 = Op.getOperand(2);
17063 SDValue Mask = Op.getOperand(3);
17064 SDValue RoundingMode = Op.getOperand(4);
17065 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17067 Mask, Src0, Subtarget, DAG);
17069 case INTR_TYPE_SCALAR_MASK_RM: {
17070 SDValue Src1 = Op.getOperand(1);
17071 SDValue Src2 = Op.getOperand(2);
17072 SDValue Src0 = Op.getOperand(3);
17073 SDValue Mask = Op.getOperand(4);
17074 SDValue RoundingMode = Op.getOperand(5);
17075 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17077 Mask, Src0, Subtarget, DAG);
17079 case INTR_TYPE_2OP_MASK: {
17080 SDValue Mask = Op.getOperand(4);
17081 SDValue PassThru = Op.getOperand(3);
17082 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17083 if (IntrWithRoundingModeOpcode != 0) {
17084 unsigned Round = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
17085 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17086 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17087 dl, Op.getValueType(),
17088 Op.getOperand(1), Op.getOperand(2),
17089 Op.getOperand(3), Op.getOperand(5)),
17090 Mask, PassThru, Subtarget, DAG);
17093 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17096 Mask, PassThru, Subtarget, DAG);
17098 case FMA_OP_MASK: {
17099 SDValue Src1 = Op.getOperand(1);
17100 SDValue Src2 = Op.getOperand(2);
17101 SDValue Src3 = Op.getOperand(3);
17102 SDValue Mask = Op.getOperand(4);
17103 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17104 if (IntrWithRoundingModeOpcode != 0) {
17105 SDValue Rnd = Op.getOperand(5);
17106 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17107 X86::STATIC_ROUNDING::CUR_DIRECTION)
17108 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17109 dl, Op.getValueType(),
17110 Src1, Src2, Src3, Rnd),
17111 Mask, Src1, Subtarget, DAG);
17113 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17114 dl, Op.getValueType(),
17116 Mask, Src1, Subtarget, DAG);
17119 case CMP_MASK_CC: {
17120 // Comparison intrinsics with masks.
17121 // Example of transformation:
17122 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17123 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17125 // (v8i1 (insert_subvector undef,
17126 // (v2i1 (and (PCMPEQM %a, %b),
17127 // (extract_subvector
17128 // (v8i1 (bitcast %mask)), 0))), 0))))
17129 EVT VT = Op.getOperand(1).getValueType();
17130 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17131 VT.getVectorNumElements());
17132 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17133 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17134 Mask.getValueType().getSizeInBits());
17136 if (IntrData->Type == CMP_MASK_CC) {
17137 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17138 Op.getOperand(2), Op.getOperand(3));
17140 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17141 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17144 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17145 DAG.getTargetConstant(0, MaskVT),
17147 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17148 DAG.getUNDEF(BitcastVT), CmpMask,
17149 DAG.getIntPtrConstant(0));
17150 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17152 case COMI: { // Comparison intrinsics
17153 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17154 SDValue LHS = Op.getOperand(1);
17155 SDValue RHS = Op.getOperand(2);
17156 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17157 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17158 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17159 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17160 DAG.getConstant(X86CC, MVT::i8), Cond);
17161 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17164 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17165 Op.getOperand(1), Op.getOperand(2), DAG);
17167 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17168 Op.getSimpleValueType(),
17170 Op.getOperand(2), DAG),
17171 Op.getOperand(4), Op.getOperand(3), Subtarget,
17173 case COMPRESS_EXPAND_IN_REG: {
17174 SDValue Mask = Op.getOperand(3);
17175 SDValue DataToCompress = Op.getOperand(1);
17176 SDValue PassThru = Op.getOperand(2);
17177 if (isAllOnes(Mask)) // return data as is
17178 return Op.getOperand(1);
17179 EVT VT = Op.getValueType();
17180 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17181 VT.getVectorNumElements());
17182 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17183 Mask.getValueType().getSizeInBits());
17185 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17186 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17187 DAG.getIntPtrConstant(0));
17189 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17193 SDValue Mask = Op.getOperand(3);
17194 EVT VT = Op.getValueType();
17195 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17196 VT.getVectorNumElements());
17197 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17198 Mask.getValueType().getSizeInBits());
17200 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17201 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17202 DAG.getIntPtrConstant(0));
17203 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17212 default: return SDValue(); // Don't custom lower most intrinsics.
17214 case Intrinsic::x86_avx512_mask_valign_q_512:
17215 case Intrinsic::x86_avx512_mask_valign_d_512:
17216 // Vector source operands are swapped.
17217 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17218 Op.getValueType(), Op.getOperand(2),
17221 Op.getOperand(5), Op.getOperand(4),
17224 // ptest and testp intrinsics. The intrinsic these come from are designed to
17225 // return an integer value, not just an instruction so lower it to the ptest
17226 // or testp pattern and a setcc for the result.
17227 case Intrinsic::x86_sse41_ptestz:
17228 case Intrinsic::x86_sse41_ptestc:
17229 case Intrinsic::x86_sse41_ptestnzc:
17230 case Intrinsic::x86_avx_ptestz_256:
17231 case Intrinsic::x86_avx_ptestc_256:
17232 case Intrinsic::x86_avx_ptestnzc_256:
17233 case Intrinsic::x86_avx_vtestz_ps:
17234 case Intrinsic::x86_avx_vtestc_ps:
17235 case Intrinsic::x86_avx_vtestnzc_ps:
17236 case Intrinsic::x86_avx_vtestz_pd:
17237 case Intrinsic::x86_avx_vtestc_pd:
17238 case Intrinsic::x86_avx_vtestnzc_pd:
17239 case Intrinsic::x86_avx_vtestz_ps_256:
17240 case Intrinsic::x86_avx_vtestc_ps_256:
17241 case Intrinsic::x86_avx_vtestnzc_ps_256:
17242 case Intrinsic::x86_avx_vtestz_pd_256:
17243 case Intrinsic::x86_avx_vtestc_pd_256:
17244 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17245 bool IsTestPacked = false;
17248 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17249 case Intrinsic::x86_avx_vtestz_ps:
17250 case Intrinsic::x86_avx_vtestz_pd:
17251 case Intrinsic::x86_avx_vtestz_ps_256:
17252 case Intrinsic::x86_avx_vtestz_pd_256:
17253 IsTestPacked = true; // Fallthrough
17254 case Intrinsic::x86_sse41_ptestz:
17255 case Intrinsic::x86_avx_ptestz_256:
17257 X86CC = X86::COND_E;
17259 case Intrinsic::x86_avx_vtestc_ps:
17260 case Intrinsic::x86_avx_vtestc_pd:
17261 case Intrinsic::x86_avx_vtestc_ps_256:
17262 case Intrinsic::x86_avx_vtestc_pd_256:
17263 IsTestPacked = true; // Fallthrough
17264 case Intrinsic::x86_sse41_ptestc:
17265 case Intrinsic::x86_avx_ptestc_256:
17267 X86CC = X86::COND_B;
17269 case Intrinsic::x86_avx_vtestnzc_ps:
17270 case Intrinsic::x86_avx_vtestnzc_pd:
17271 case Intrinsic::x86_avx_vtestnzc_ps_256:
17272 case Intrinsic::x86_avx_vtestnzc_pd_256:
17273 IsTestPacked = true; // Fallthrough
17274 case Intrinsic::x86_sse41_ptestnzc:
17275 case Intrinsic::x86_avx_ptestnzc_256:
17277 X86CC = X86::COND_A;
17281 SDValue LHS = Op.getOperand(1);
17282 SDValue RHS = Op.getOperand(2);
17283 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17284 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17285 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17286 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17287 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17289 case Intrinsic::x86_avx512_kortestz_w:
17290 case Intrinsic::x86_avx512_kortestc_w: {
17291 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17292 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17293 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17294 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17295 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17296 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17297 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17300 case Intrinsic::x86_sse42_pcmpistria128:
17301 case Intrinsic::x86_sse42_pcmpestria128:
17302 case Intrinsic::x86_sse42_pcmpistric128:
17303 case Intrinsic::x86_sse42_pcmpestric128:
17304 case Intrinsic::x86_sse42_pcmpistrio128:
17305 case Intrinsic::x86_sse42_pcmpestrio128:
17306 case Intrinsic::x86_sse42_pcmpistris128:
17307 case Intrinsic::x86_sse42_pcmpestris128:
17308 case Intrinsic::x86_sse42_pcmpistriz128:
17309 case Intrinsic::x86_sse42_pcmpestriz128: {
17313 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17314 case Intrinsic::x86_sse42_pcmpistria128:
17315 Opcode = X86ISD::PCMPISTRI;
17316 X86CC = X86::COND_A;
17318 case Intrinsic::x86_sse42_pcmpestria128:
17319 Opcode = X86ISD::PCMPESTRI;
17320 X86CC = X86::COND_A;
17322 case Intrinsic::x86_sse42_pcmpistric128:
17323 Opcode = X86ISD::PCMPISTRI;
17324 X86CC = X86::COND_B;
17326 case Intrinsic::x86_sse42_pcmpestric128:
17327 Opcode = X86ISD::PCMPESTRI;
17328 X86CC = X86::COND_B;
17330 case Intrinsic::x86_sse42_pcmpistrio128:
17331 Opcode = X86ISD::PCMPISTRI;
17332 X86CC = X86::COND_O;
17334 case Intrinsic::x86_sse42_pcmpestrio128:
17335 Opcode = X86ISD::PCMPESTRI;
17336 X86CC = X86::COND_O;
17338 case Intrinsic::x86_sse42_pcmpistris128:
17339 Opcode = X86ISD::PCMPISTRI;
17340 X86CC = X86::COND_S;
17342 case Intrinsic::x86_sse42_pcmpestris128:
17343 Opcode = X86ISD::PCMPESTRI;
17344 X86CC = X86::COND_S;
17346 case Intrinsic::x86_sse42_pcmpistriz128:
17347 Opcode = X86ISD::PCMPISTRI;
17348 X86CC = X86::COND_E;
17350 case Intrinsic::x86_sse42_pcmpestriz128:
17351 Opcode = X86ISD::PCMPESTRI;
17352 X86CC = X86::COND_E;
17355 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17356 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17357 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17358 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17359 DAG.getConstant(X86CC, MVT::i8),
17360 SDValue(PCMP.getNode(), 1));
17361 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17364 case Intrinsic::x86_sse42_pcmpistri128:
17365 case Intrinsic::x86_sse42_pcmpestri128: {
17367 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17368 Opcode = X86ISD::PCMPISTRI;
17370 Opcode = X86ISD::PCMPESTRI;
17372 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17373 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17374 return DAG.getNode(Opcode, dl, VTs, NewOps);
17379 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17380 SDValue Src, SDValue Mask, SDValue Base,
17381 SDValue Index, SDValue ScaleOp, SDValue Chain,
17382 const X86Subtarget * Subtarget) {
17384 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17385 assert(C && "Invalid scale type");
17386 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17387 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17388 Index.getSimpleValueType().getVectorNumElements());
17390 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17392 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17394 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17395 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17396 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17397 SDValue Segment = DAG.getRegister(0, MVT::i32);
17398 if (Src.getOpcode() == ISD::UNDEF)
17399 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17400 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17401 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17402 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17403 return DAG.getMergeValues(RetOps, dl);
17406 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17407 SDValue Src, SDValue Mask, SDValue Base,
17408 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17410 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17411 assert(C && "Invalid scale type");
17412 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17413 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17414 SDValue Segment = DAG.getRegister(0, MVT::i32);
17415 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17416 Index.getSimpleValueType().getVectorNumElements());
17418 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17420 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17422 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17423 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17424 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17425 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17426 return SDValue(Res, 1);
17429 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17430 SDValue Mask, SDValue Base, SDValue Index,
17431 SDValue ScaleOp, SDValue Chain) {
17433 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17434 assert(C && "Invalid scale type");
17435 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17436 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17437 SDValue Segment = DAG.getRegister(0, MVT::i32);
17439 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17441 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17443 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17445 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17446 //SDVTList VTs = DAG.getVTList(MVT::Other);
17447 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17448 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17449 return SDValue(Res, 0);
17452 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17453 // read performance monitor counters (x86_rdpmc).
17454 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17455 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17456 SmallVectorImpl<SDValue> &Results) {
17457 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17458 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17461 // The ECX register is used to select the index of the performance counter
17463 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17465 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17467 // Reads the content of a 64-bit performance counter and returns it in the
17468 // registers EDX:EAX.
17469 if (Subtarget->is64Bit()) {
17470 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17471 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17474 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17475 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17478 Chain = HI.getValue(1);
17480 if (Subtarget->is64Bit()) {
17481 // The EAX register is loaded with the low-order 32 bits. The EDX register
17482 // is loaded with the supported high-order bits of the counter.
17483 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17484 DAG.getConstant(32, MVT::i8));
17485 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17486 Results.push_back(Chain);
17490 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17491 SDValue Ops[] = { LO, HI };
17492 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17493 Results.push_back(Pair);
17494 Results.push_back(Chain);
17497 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17498 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17499 // also used to custom lower READCYCLECOUNTER nodes.
17500 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17501 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17502 SmallVectorImpl<SDValue> &Results) {
17503 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17504 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17507 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17508 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17509 // and the EAX register is loaded with the low-order 32 bits.
17510 if (Subtarget->is64Bit()) {
17511 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17512 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17515 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17516 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17519 SDValue Chain = HI.getValue(1);
17521 if (Opcode == X86ISD::RDTSCP_DAG) {
17522 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17524 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17525 // the ECX register. Add 'ecx' explicitly to the chain.
17526 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17528 // Explicitly store the content of ECX at the location passed in input
17529 // to the 'rdtscp' intrinsic.
17530 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17531 MachinePointerInfo(), false, false, 0);
17534 if (Subtarget->is64Bit()) {
17535 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17536 // the EAX register is loaded with the low-order 32 bits.
17537 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17538 DAG.getConstant(32, MVT::i8));
17539 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17540 Results.push_back(Chain);
17544 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17545 SDValue Ops[] = { LO, HI };
17546 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17547 Results.push_back(Pair);
17548 Results.push_back(Chain);
17551 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17552 SelectionDAG &DAG) {
17553 SmallVector<SDValue, 2> Results;
17555 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17557 return DAG.getMergeValues(Results, DL);
17561 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17562 SelectionDAG &DAG) {
17563 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17565 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17570 switch(IntrData->Type) {
17572 llvm_unreachable("Unknown Intrinsic Type");
17576 // Emit the node with the right value type.
17577 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17578 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17580 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17581 // Otherwise return the value from Rand, which is always 0, casted to i32.
17582 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17583 DAG.getConstant(1, Op->getValueType(1)),
17584 DAG.getConstant(X86::COND_B, MVT::i32),
17585 SDValue(Result.getNode(), 1) };
17586 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17587 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17590 // Return { result, isValid, chain }.
17591 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17592 SDValue(Result.getNode(), 2));
17595 //gather(v1, mask, index, base, scale);
17596 SDValue Chain = Op.getOperand(0);
17597 SDValue Src = Op.getOperand(2);
17598 SDValue Base = Op.getOperand(3);
17599 SDValue Index = Op.getOperand(4);
17600 SDValue Mask = Op.getOperand(5);
17601 SDValue Scale = Op.getOperand(6);
17602 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
17606 //scatter(base, mask, index, v1, scale);
17607 SDValue Chain = Op.getOperand(0);
17608 SDValue Base = Op.getOperand(2);
17609 SDValue Mask = Op.getOperand(3);
17610 SDValue Index = Op.getOperand(4);
17611 SDValue Src = Op.getOperand(5);
17612 SDValue Scale = Op.getOperand(6);
17613 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
17616 SDValue Hint = Op.getOperand(6);
17618 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
17619 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
17620 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
17621 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
17622 SDValue Chain = Op.getOperand(0);
17623 SDValue Mask = Op.getOperand(2);
17624 SDValue Index = Op.getOperand(3);
17625 SDValue Base = Op.getOperand(4);
17626 SDValue Scale = Op.getOperand(5);
17627 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
17629 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
17631 SmallVector<SDValue, 2> Results;
17632 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
17633 return DAG.getMergeValues(Results, dl);
17635 // Read Performance Monitoring Counters.
17637 SmallVector<SDValue, 2> Results;
17638 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
17639 return DAG.getMergeValues(Results, dl);
17641 // XTEST intrinsics.
17643 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17644 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17645 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17646 DAG.getConstant(X86::COND_NE, MVT::i8),
17648 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
17649 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
17650 Ret, SDValue(InTrans.getNode(), 1));
17654 SmallVector<SDValue, 2> Results;
17655 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17656 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
17657 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
17658 DAG.getConstant(-1, MVT::i8));
17659 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
17660 Op.getOperand(4), GenCF.getValue(1));
17661 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
17662 Op.getOperand(5), MachinePointerInfo(),
17664 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17665 DAG.getConstant(X86::COND_B, MVT::i8),
17667 Results.push_back(SetCC);
17668 Results.push_back(Store);
17669 return DAG.getMergeValues(Results, dl);
17671 case COMPRESS_TO_MEM: {
17673 SDValue Mask = Op.getOperand(4);
17674 SDValue DataToCompress = Op.getOperand(3);
17675 SDValue Addr = Op.getOperand(2);
17676 SDValue Chain = Op.getOperand(0);
17678 if (isAllOnes(Mask)) // return just a store
17679 return DAG.getStore(Chain, dl, DataToCompress, Addr,
17680 MachinePointerInfo(), false, false, 0);
17682 EVT VT = DataToCompress.getValueType();
17683 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17684 VT.getVectorNumElements());
17685 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17686 Mask.getValueType().getSizeInBits());
17687 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17688 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17689 DAG.getIntPtrConstant(0));
17691 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
17692 DataToCompress, DAG.getUNDEF(VT));
17693 return DAG.getStore(Chain, dl, Compressed, Addr,
17694 MachinePointerInfo(), false, false, 0);
17696 case EXPAND_FROM_MEM: {
17698 SDValue Mask = Op.getOperand(4);
17699 SDValue PathThru = Op.getOperand(3);
17700 SDValue Addr = Op.getOperand(2);
17701 SDValue Chain = Op.getOperand(0);
17702 EVT VT = Op.getValueType();
17704 if (isAllOnes(Mask)) // return just a load
17705 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
17707 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17708 VT.getVectorNumElements());
17709 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17710 Mask.getValueType().getSizeInBits());
17711 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17712 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17713 DAG.getIntPtrConstant(0));
17715 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
17716 false, false, false, 0);
17718 SmallVector<SDValue, 2> Results;
17719 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
17721 Results.push_back(Chain);
17722 return DAG.getMergeValues(Results, dl);
17727 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
17728 SelectionDAG &DAG) const {
17729 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
17730 MFI->setReturnAddressIsTaken(true);
17732 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
17735 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17737 EVT PtrVT = getPointerTy();
17740 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
17741 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
17742 DAG.getSubtarget().getRegisterInfo());
17743 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
17744 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
17745 DAG.getNode(ISD::ADD, dl, PtrVT,
17746 FrameAddr, Offset),
17747 MachinePointerInfo(), false, false, false, 0);
17750 // Just load the return address.
17751 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
17752 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
17753 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
17756 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
17757 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
17758 MFI->setFrameAddressIsTaken(true);
17760 EVT VT = Op.getValueType();
17761 SDLoc dl(Op); // FIXME probably not meaningful
17762 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17763 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
17764 DAG.getSubtarget().getRegisterInfo());
17765 unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(
17766 DAG.getMachineFunction());
17767 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
17768 (FrameReg == X86::EBP && VT == MVT::i32)) &&
17769 "Invalid Frame Register!");
17770 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
17772 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
17773 MachinePointerInfo(),
17774 false, false, false, 0);
17778 // FIXME? Maybe this could be a TableGen attribute on some registers and
17779 // this table could be generated automatically from RegInfo.
17780 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
17782 unsigned Reg = StringSwitch<unsigned>(RegName)
17783 .Case("esp", X86::ESP)
17784 .Case("rsp", X86::RSP)
17788 report_fatal_error("Invalid register name global variable");
17791 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
17792 SelectionDAG &DAG) const {
17793 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
17794 DAG.getSubtarget().getRegisterInfo());
17795 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
17798 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
17799 SDValue Chain = Op.getOperand(0);
17800 SDValue Offset = Op.getOperand(1);
17801 SDValue Handler = Op.getOperand(2);
17804 EVT PtrVT = getPointerTy();
17805 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
17806 DAG.getSubtarget().getRegisterInfo());
17807 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
17808 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
17809 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
17810 "Invalid Frame Register!");
17811 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
17812 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
17814 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
17815 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
17816 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
17817 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
17819 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
17821 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
17822 DAG.getRegister(StoreAddrReg, PtrVT));
17825 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
17826 SelectionDAG &DAG) const {
17828 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
17829 DAG.getVTList(MVT::i32, MVT::Other),
17830 Op.getOperand(0), Op.getOperand(1));
17833 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
17834 SelectionDAG &DAG) const {
17836 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
17837 Op.getOperand(0), Op.getOperand(1));
17840 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
17841 return Op.getOperand(0);
17844 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
17845 SelectionDAG &DAG) const {
17846 SDValue Root = Op.getOperand(0);
17847 SDValue Trmp = Op.getOperand(1); // trampoline
17848 SDValue FPtr = Op.getOperand(2); // nested function
17849 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
17852 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17853 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
17855 if (Subtarget->is64Bit()) {
17856 SDValue OutChains[6];
17858 // Large code-model.
17859 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
17860 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
17862 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
17863 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
17865 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
17867 // Load the pointer to the nested function into R11.
17868 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
17869 SDValue Addr = Trmp;
17870 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
17871 Addr, MachinePointerInfo(TrmpAddr),
17874 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17875 DAG.getConstant(2, MVT::i64));
17876 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
17877 MachinePointerInfo(TrmpAddr, 2),
17880 // Load the 'nest' parameter value into R10.
17881 // R10 is specified in X86CallingConv.td
17882 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
17883 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17884 DAG.getConstant(10, MVT::i64));
17885 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
17886 Addr, MachinePointerInfo(TrmpAddr, 10),
17889 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17890 DAG.getConstant(12, MVT::i64));
17891 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
17892 MachinePointerInfo(TrmpAddr, 12),
17895 // Jump to the nested function.
17896 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
17897 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17898 DAG.getConstant(20, MVT::i64));
17899 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
17900 Addr, MachinePointerInfo(TrmpAddr, 20),
17903 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
17904 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17905 DAG.getConstant(22, MVT::i64));
17906 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
17907 MachinePointerInfo(TrmpAddr, 22),
17910 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
17912 const Function *Func =
17913 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
17914 CallingConv::ID CC = Func->getCallingConv();
17919 llvm_unreachable("Unsupported calling convention");
17920 case CallingConv::C:
17921 case CallingConv::X86_StdCall: {
17922 // Pass 'nest' parameter in ECX.
17923 // Must be kept in sync with X86CallingConv.td
17924 NestReg = X86::ECX;
17926 // Check that ECX wasn't needed by an 'inreg' parameter.
17927 FunctionType *FTy = Func->getFunctionType();
17928 const AttributeSet &Attrs = Func->getAttributes();
17930 if (!Attrs.isEmpty() && !Func->isVarArg()) {
17931 unsigned InRegCount = 0;
17934 for (FunctionType::param_iterator I = FTy->param_begin(),
17935 E = FTy->param_end(); I != E; ++I, ++Idx)
17936 if (Attrs.hasAttribute(Idx, Attribute::InReg))
17937 // FIXME: should only count parameters that are lowered to integers.
17938 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
17940 if (InRegCount > 2) {
17941 report_fatal_error("Nest register in use - reduce number of inreg"
17947 case CallingConv::X86_FastCall:
17948 case CallingConv::X86_ThisCall:
17949 case CallingConv::Fast:
17950 // Pass 'nest' parameter in EAX.
17951 // Must be kept in sync with X86CallingConv.td
17952 NestReg = X86::EAX;
17956 SDValue OutChains[4];
17957 SDValue Addr, Disp;
17959 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17960 DAG.getConstant(10, MVT::i32));
17961 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
17963 // This is storing the opcode for MOV32ri.
17964 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
17965 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
17966 OutChains[0] = DAG.getStore(Root, dl,
17967 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
17968 Trmp, MachinePointerInfo(TrmpAddr),
17971 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17972 DAG.getConstant(1, MVT::i32));
17973 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
17974 MachinePointerInfo(TrmpAddr, 1),
17977 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
17978 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17979 DAG.getConstant(5, MVT::i32));
17980 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
17981 MachinePointerInfo(TrmpAddr, 5),
17984 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17985 DAG.getConstant(6, MVT::i32));
17986 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
17987 MachinePointerInfo(TrmpAddr, 6),
17990 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
17994 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
17995 SelectionDAG &DAG) const {
17997 The rounding mode is in bits 11:10 of FPSR, and has the following
17999 00 Round to nearest
18004 FLT_ROUNDS, on the other hand, expects the following:
18011 To perform the conversion, we do:
18012 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18015 MachineFunction &MF = DAG.getMachineFunction();
18016 const TargetMachine &TM = MF.getTarget();
18017 const TargetFrameLowering &TFI = *TM.getSubtargetImpl()->getFrameLowering();
18018 unsigned StackAlignment = TFI.getStackAlignment();
18019 MVT VT = Op.getSimpleValueType();
18022 // Save FP Control Word to stack slot
18023 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18024 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18026 MachineMemOperand *MMO =
18027 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18028 MachineMemOperand::MOStore, 2, 2);
18030 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18031 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18032 DAG.getVTList(MVT::Other),
18033 Ops, MVT::i16, MMO);
18035 // Load FP Control Word from stack slot
18036 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18037 MachinePointerInfo(), false, false, false, 0);
18039 // Transform as necessary
18041 DAG.getNode(ISD::SRL, DL, MVT::i16,
18042 DAG.getNode(ISD::AND, DL, MVT::i16,
18043 CWD, DAG.getConstant(0x800, MVT::i16)),
18044 DAG.getConstant(11, MVT::i8));
18046 DAG.getNode(ISD::SRL, DL, MVT::i16,
18047 DAG.getNode(ISD::AND, DL, MVT::i16,
18048 CWD, DAG.getConstant(0x400, MVT::i16)),
18049 DAG.getConstant(9, MVT::i8));
18052 DAG.getNode(ISD::AND, DL, MVT::i16,
18053 DAG.getNode(ISD::ADD, DL, MVT::i16,
18054 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18055 DAG.getConstant(1, MVT::i16)),
18056 DAG.getConstant(3, MVT::i16));
18058 return DAG.getNode((VT.getSizeInBits() < 16 ?
18059 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18062 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18063 MVT VT = Op.getSimpleValueType();
18065 unsigned NumBits = VT.getSizeInBits();
18068 Op = Op.getOperand(0);
18069 if (VT == MVT::i8) {
18070 // Zero extend to i32 since there is not an i8 bsr.
18072 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18075 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18076 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18077 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18079 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18082 DAG.getConstant(NumBits+NumBits-1, OpVT),
18083 DAG.getConstant(X86::COND_E, MVT::i8),
18086 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18088 // Finally xor with NumBits-1.
18089 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18092 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18096 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18097 MVT VT = Op.getSimpleValueType();
18099 unsigned NumBits = VT.getSizeInBits();
18102 Op = Op.getOperand(0);
18103 if (VT == MVT::i8) {
18104 // Zero extend to i32 since there is not an i8 bsr.
18106 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18109 // Issue a bsr (scan bits in reverse).
18110 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18111 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18113 // And xor with NumBits-1.
18114 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18117 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18121 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18122 MVT VT = Op.getSimpleValueType();
18123 unsigned NumBits = VT.getSizeInBits();
18125 Op = Op.getOperand(0);
18127 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18128 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18129 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18131 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18134 DAG.getConstant(NumBits, VT),
18135 DAG.getConstant(X86::COND_E, MVT::i8),
18138 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18141 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18142 // ones, and then concatenate the result back.
18143 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18144 MVT VT = Op.getSimpleValueType();
18146 assert(VT.is256BitVector() && VT.isInteger() &&
18147 "Unsupported value type for operation");
18149 unsigned NumElems = VT.getVectorNumElements();
18152 // Extract the LHS vectors
18153 SDValue LHS = Op.getOperand(0);
18154 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18155 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18157 // Extract the RHS vectors
18158 SDValue RHS = Op.getOperand(1);
18159 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18160 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18162 MVT EltVT = VT.getVectorElementType();
18163 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18165 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18166 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18167 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18170 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18171 assert(Op.getSimpleValueType().is256BitVector() &&
18172 Op.getSimpleValueType().isInteger() &&
18173 "Only handle AVX 256-bit vector integer operation");
18174 return Lower256IntArith(Op, DAG);
18177 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18178 assert(Op.getSimpleValueType().is256BitVector() &&
18179 Op.getSimpleValueType().isInteger() &&
18180 "Only handle AVX 256-bit vector integer operation");
18181 return Lower256IntArith(Op, DAG);
18184 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18185 SelectionDAG &DAG) {
18187 MVT VT = Op.getSimpleValueType();
18189 // Decompose 256-bit ops into smaller 128-bit ops.
18190 if (VT.is256BitVector() && !Subtarget->hasInt256())
18191 return Lower256IntArith(Op, DAG);
18193 SDValue A = Op.getOperand(0);
18194 SDValue B = Op.getOperand(1);
18196 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18197 if (VT == MVT::v4i32) {
18198 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18199 "Should not custom lower when pmuldq is available!");
18201 // Extract the odd parts.
18202 static const int UnpackMask[] = { 1, -1, 3, -1 };
18203 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18204 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18206 // Multiply the even parts.
18207 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18208 // Now multiply odd parts.
18209 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18211 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18212 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18214 // Merge the two vectors back together with a shuffle. This expands into 2
18216 static const int ShufMask[] = { 0, 4, 2, 6 };
18217 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18220 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18221 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18223 // Ahi = psrlqi(a, 32);
18224 // Bhi = psrlqi(b, 32);
18226 // AloBlo = pmuludq(a, b);
18227 // AloBhi = pmuludq(a, Bhi);
18228 // AhiBlo = pmuludq(Ahi, b);
18230 // AloBhi = psllqi(AloBhi, 32);
18231 // AhiBlo = psllqi(AhiBlo, 32);
18232 // return AloBlo + AloBhi + AhiBlo;
18234 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18235 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18237 // Bit cast to 32-bit vectors for MULUDQ
18238 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18239 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18240 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18241 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18242 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18243 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18245 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18246 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18247 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18249 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18250 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18252 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18253 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18256 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18257 assert(Subtarget->isTargetWin64() && "Unexpected target");
18258 EVT VT = Op.getValueType();
18259 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18260 "Unexpected return type for lowering");
18264 switch (Op->getOpcode()) {
18265 default: llvm_unreachable("Unexpected request for libcall!");
18266 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18267 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18268 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18269 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18270 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18271 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18275 SDValue InChain = DAG.getEntryNode();
18277 TargetLowering::ArgListTy Args;
18278 TargetLowering::ArgListEntry Entry;
18279 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18280 EVT ArgVT = Op->getOperand(i).getValueType();
18281 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18282 "Unexpected argument type for lowering");
18283 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18284 Entry.Node = StackPtr;
18285 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18287 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18288 Entry.Ty = PointerType::get(ArgTy,0);
18289 Entry.isSExt = false;
18290 Entry.isZExt = false;
18291 Args.push_back(Entry);
18294 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18297 TargetLowering::CallLoweringInfo CLI(DAG);
18298 CLI.setDebugLoc(dl).setChain(InChain)
18299 .setCallee(getLibcallCallingConv(LC),
18300 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18301 Callee, std::move(Args), 0)
18302 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18304 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18305 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18308 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18309 SelectionDAG &DAG) {
18310 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18311 EVT VT = Op0.getValueType();
18314 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18315 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18317 // PMULxD operations multiply each even value (starting at 0) of LHS with
18318 // the related value of RHS and produce a widen result.
18319 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18320 // => <2 x i64> <ae|cg>
18322 // In other word, to have all the results, we need to perform two PMULxD:
18323 // 1. one with the even values.
18324 // 2. one with the odd values.
18325 // To achieve #2, with need to place the odd values at an even position.
18327 // Place the odd value at an even position (basically, shift all values 1
18328 // step to the left):
18329 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18330 // <a|b|c|d> => <b|undef|d|undef>
18331 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18332 // <e|f|g|h> => <f|undef|h|undef>
18333 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18335 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18337 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18338 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18340 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18341 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18342 // => <2 x i64> <ae|cg>
18343 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18344 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18345 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18346 // => <2 x i64> <bf|dh>
18347 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18348 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18350 // Shuffle it back into the right order.
18351 SDValue Highs, Lows;
18352 if (VT == MVT::v8i32) {
18353 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18354 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18355 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18356 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18358 const int HighMask[] = {1, 5, 3, 7};
18359 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18360 const int LowMask[] = {0, 4, 2, 6};
18361 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18364 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18365 // unsigned multiply.
18366 if (IsSigned && !Subtarget->hasSSE41()) {
18368 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18369 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18370 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18371 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18372 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18374 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18375 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18378 // The first result of MUL_LOHI is actually the low value, followed by the
18380 SDValue Ops[] = {Lows, Highs};
18381 return DAG.getMergeValues(Ops, dl);
18384 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18385 const X86Subtarget *Subtarget) {
18386 MVT VT = Op.getSimpleValueType();
18388 SDValue R = Op.getOperand(0);
18389 SDValue Amt = Op.getOperand(1);
18391 // Optimize shl/srl/sra with constant shift amount.
18392 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18393 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18394 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18396 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18397 (Subtarget->hasInt256() &&
18398 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18399 (Subtarget->hasAVX512() &&
18400 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18401 if (Op.getOpcode() == ISD::SHL)
18402 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18404 if (Op.getOpcode() == ISD::SRL)
18405 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18407 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18408 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18412 if (VT == MVT::v16i8) {
18413 if (Op.getOpcode() == ISD::SHL) {
18414 // Make a large shift.
18415 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18416 MVT::v8i16, R, ShiftAmt,
18418 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18419 // Zero out the rightmost bits.
18420 SmallVector<SDValue, 16> V(16,
18421 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18423 return DAG.getNode(ISD::AND, dl, VT, SHL,
18424 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18426 if (Op.getOpcode() == ISD::SRL) {
18427 // Make a large shift.
18428 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18429 MVT::v8i16, R, ShiftAmt,
18431 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18432 // Zero out the leftmost bits.
18433 SmallVector<SDValue, 16> V(16,
18434 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18436 return DAG.getNode(ISD::AND, dl, VT, SRL,
18437 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18439 if (Op.getOpcode() == ISD::SRA) {
18440 if (ShiftAmt == 7) {
18441 // R s>> 7 === R s< 0
18442 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18443 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18446 // R s>> a === ((R u>> a) ^ m) - m
18447 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18448 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18450 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18451 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18452 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18455 llvm_unreachable("Unknown shift opcode.");
18458 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18459 if (Op.getOpcode() == ISD::SHL) {
18460 // Make a large shift.
18461 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18462 MVT::v16i16, R, ShiftAmt,
18464 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18465 // Zero out the rightmost bits.
18466 SmallVector<SDValue, 32> V(32,
18467 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18469 return DAG.getNode(ISD::AND, dl, VT, SHL,
18470 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18472 if (Op.getOpcode() == ISD::SRL) {
18473 // Make a large shift.
18474 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18475 MVT::v16i16, R, ShiftAmt,
18477 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18478 // Zero out the leftmost bits.
18479 SmallVector<SDValue, 32> V(32,
18480 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18482 return DAG.getNode(ISD::AND, dl, VT, SRL,
18483 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18485 if (Op.getOpcode() == ISD::SRA) {
18486 if (ShiftAmt == 7) {
18487 // R s>> 7 === R s< 0
18488 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18489 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18492 // R s>> a === ((R u>> a) ^ m) - m
18493 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18494 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18496 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18497 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18498 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18501 llvm_unreachable("Unknown shift opcode.");
18506 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18507 if (!Subtarget->is64Bit() &&
18508 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18509 Amt.getOpcode() == ISD::BITCAST &&
18510 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18511 Amt = Amt.getOperand(0);
18512 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18513 VT.getVectorNumElements();
18514 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18515 uint64_t ShiftAmt = 0;
18516 for (unsigned i = 0; i != Ratio; ++i) {
18517 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18521 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18523 // Check remaining shift amounts.
18524 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18525 uint64_t ShAmt = 0;
18526 for (unsigned j = 0; j != Ratio; ++j) {
18527 ConstantSDNode *C =
18528 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18532 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18534 if (ShAmt != ShiftAmt)
18537 switch (Op.getOpcode()) {
18539 llvm_unreachable("Unknown shift opcode!");
18541 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18544 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18547 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18555 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18556 const X86Subtarget* Subtarget) {
18557 MVT VT = Op.getSimpleValueType();
18559 SDValue R = Op.getOperand(0);
18560 SDValue Amt = Op.getOperand(1);
18562 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
18563 VT == MVT::v4i32 || VT == MVT::v8i16 ||
18564 (Subtarget->hasInt256() &&
18565 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
18566 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18567 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18569 EVT EltVT = VT.getVectorElementType();
18571 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18572 // Check if this build_vector node is doing a splat.
18573 // If so, then set BaseShAmt equal to the splat value.
18574 BaseShAmt = BV->getSplatValue();
18575 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18576 BaseShAmt = SDValue();
18578 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18579 Amt = Amt.getOperand(0);
18581 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18582 if (SVN && SVN->isSplat()) {
18583 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18584 SDValue InVec = Amt.getOperand(0);
18585 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18586 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
18587 "Unexpected shuffle index found!");
18588 BaseShAmt = InVec.getOperand(SplatIdx);
18589 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
18590 if (ConstantSDNode *C =
18591 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
18592 if (C->getZExtValue() == SplatIdx)
18593 BaseShAmt = InVec.getOperand(1);
18598 // Avoid introducing an extract element from a shuffle.
18599 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
18600 DAG.getIntPtrConstant(SplatIdx));
18604 if (BaseShAmt.getNode()) {
18605 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
18606 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
18607 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
18608 else if (EltVT.bitsLT(MVT::i32))
18609 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
18611 switch (Op.getOpcode()) {
18613 llvm_unreachable("Unknown shift opcode!");
18615 switch (VT.SimpleTy) {
18616 default: return SDValue();
18625 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
18628 switch (VT.SimpleTy) {
18629 default: return SDValue();
18636 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
18639 switch (VT.SimpleTy) {
18640 default: return SDValue();
18649 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
18655 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18656 if (!Subtarget->is64Bit() &&
18657 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
18658 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
18659 Amt.getOpcode() == ISD::BITCAST &&
18660 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18661 Amt = Amt.getOperand(0);
18662 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18663 VT.getVectorNumElements();
18664 std::vector<SDValue> Vals(Ratio);
18665 for (unsigned i = 0; i != Ratio; ++i)
18666 Vals[i] = Amt.getOperand(i);
18667 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18668 for (unsigned j = 0; j != Ratio; ++j)
18669 if (Vals[j] != Amt.getOperand(i + j))
18672 switch (Op.getOpcode()) {
18674 llvm_unreachable("Unknown shift opcode!");
18676 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
18678 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
18680 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
18687 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
18688 SelectionDAG &DAG) {
18689 MVT VT = Op.getSimpleValueType();
18691 SDValue R = Op.getOperand(0);
18692 SDValue Amt = Op.getOperand(1);
18695 assert(VT.isVector() && "Custom lowering only for vector shifts!");
18696 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
18698 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
18702 V = LowerScalarVariableShift(Op, DAG, Subtarget);
18706 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
18708 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
18709 if (Subtarget->hasInt256()) {
18710 if (Op.getOpcode() == ISD::SRL &&
18711 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
18712 VT == MVT::v4i64 || VT == MVT::v8i32))
18714 if (Op.getOpcode() == ISD::SHL &&
18715 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
18716 VT == MVT::v4i64 || VT == MVT::v8i32))
18718 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
18722 // If possible, lower this packed shift into a vector multiply instead of
18723 // expanding it into a sequence of scalar shifts.
18724 // Do this only if the vector shift count is a constant build_vector.
18725 if (Op.getOpcode() == ISD::SHL &&
18726 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
18727 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
18728 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18729 SmallVector<SDValue, 8> Elts;
18730 EVT SVT = VT.getScalarType();
18731 unsigned SVTBits = SVT.getSizeInBits();
18732 const APInt &One = APInt(SVTBits, 1);
18733 unsigned NumElems = VT.getVectorNumElements();
18735 for (unsigned i=0; i !=NumElems; ++i) {
18736 SDValue Op = Amt->getOperand(i);
18737 if (Op->getOpcode() == ISD::UNDEF) {
18738 Elts.push_back(Op);
18742 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
18743 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
18744 uint64_t ShAmt = C.getZExtValue();
18745 if (ShAmt >= SVTBits) {
18746 Elts.push_back(DAG.getUNDEF(SVT));
18749 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
18751 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
18752 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
18755 // Lower SHL with variable shift amount.
18756 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
18757 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
18759 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
18760 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
18761 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
18762 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
18765 // If possible, lower this shift as a sequence of two shifts by
18766 // constant plus a MOVSS/MOVSD instead of scalarizing it.
18768 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
18770 // Could be rewritten as:
18771 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
18773 // The advantage is that the two shifts from the example would be
18774 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
18775 // the vector shift into four scalar shifts plus four pairs of vector
18777 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
18778 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18779 unsigned TargetOpcode = X86ISD::MOVSS;
18780 bool CanBeSimplified;
18781 // The splat value for the first packed shift (the 'X' from the example).
18782 SDValue Amt1 = Amt->getOperand(0);
18783 // The splat value for the second packed shift (the 'Y' from the example).
18784 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
18785 Amt->getOperand(2);
18787 // See if it is possible to replace this node with a sequence of
18788 // two shifts followed by a MOVSS/MOVSD
18789 if (VT == MVT::v4i32) {
18790 // Check if it is legal to use a MOVSS.
18791 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
18792 Amt2 == Amt->getOperand(3);
18793 if (!CanBeSimplified) {
18794 // Otherwise, check if we can still simplify this node using a MOVSD.
18795 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
18796 Amt->getOperand(2) == Amt->getOperand(3);
18797 TargetOpcode = X86ISD::MOVSD;
18798 Amt2 = Amt->getOperand(2);
18801 // Do similar checks for the case where the machine value type
18803 CanBeSimplified = Amt1 == Amt->getOperand(1);
18804 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
18805 CanBeSimplified = Amt2 == Amt->getOperand(i);
18807 if (!CanBeSimplified) {
18808 TargetOpcode = X86ISD::MOVSD;
18809 CanBeSimplified = true;
18810 Amt2 = Amt->getOperand(4);
18811 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
18812 CanBeSimplified = Amt1 == Amt->getOperand(i);
18813 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
18814 CanBeSimplified = Amt2 == Amt->getOperand(j);
18818 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
18819 isa<ConstantSDNode>(Amt2)) {
18820 // Replace this node with two shifts followed by a MOVSS/MOVSD.
18821 EVT CastVT = MVT::v4i32;
18823 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
18824 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
18826 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
18827 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
18828 if (TargetOpcode == X86ISD::MOVSD)
18829 CastVT = MVT::v2i64;
18830 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
18831 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
18832 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
18834 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
18838 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
18839 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
18842 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
18843 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
18845 // Turn 'a' into a mask suitable for VSELECT
18846 SDValue VSelM = DAG.getConstant(0x80, VT);
18847 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
18848 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
18850 SDValue CM1 = DAG.getConstant(0x0f, VT);
18851 SDValue CM2 = DAG.getConstant(0x3f, VT);
18853 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
18854 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
18855 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
18856 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
18857 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
18860 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
18861 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
18862 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
18864 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
18865 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
18866 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
18867 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
18868 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
18871 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
18872 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
18873 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
18875 // return VSELECT(r, r+r, a);
18876 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
18877 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
18881 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
18882 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
18883 // solution better.
18884 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
18885 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
18887 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
18888 R = DAG.getNode(ExtOpc, dl, NewVT, R);
18889 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
18890 return DAG.getNode(ISD::TRUNCATE, dl, VT,
18891 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
18894 // Decompose 256-bit shifts into smaller 128-bit shifts.
18895 if (VT.is256BitVector()) {
18896 unsigned NumElems = VT.getVectorNumElements();
18897 MVT EltVT = VT.getVectorElementType();
18898 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18900 // Extract the two vectors
18901 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
18902 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
18904 // Recreate the shift amount vectors
18905 SDValue Amt1, Amt2;
18906 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
18907 // Constant shift amount
18908 SmallVector<SDValue, 4> Amt1Csts;
18909 SmallVector<SDValue, 4> Amt2Csts;
18910 for (unsigned i = 0; i != NumElems/2; ++i)
18911 Amt1Csts.push_back(Amt->getOperand(i));
18912 for (unsigned i = NumElems/2; i != NumElems; ++i)
18913 Amt2Csts.push_back(Amt->getOperand(i));
18915 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
18916 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
18918 // Variable shift amount
18919 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
18920 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
18923 // Issue new vector shifts for the smaller types
18924 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
18925 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
18927 // Concatenate the result back
18928 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
18934 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
18935 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
18936 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
18937 // looks for this combo and may remove the "setcc" instruction if the "setcc"
18938 // has only one use.
18939 SDNode *N = Op.getNode();
18940 SDValue LHS = N->getOperand(0);
18941 SDValue RHS = N->getOperand(1);
18942 unsigned BaseOp = 0;
18945 switch (Op.getOpcode()) {
18946 default: llvm_unreachable("Unknown ovf instruction!");
18948 // A subtract of one will be selected as a INC. Note that INC doesn't
18949 // set CF, so we can't do this for UADDO.
18950 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
18952 BaseOp = X86ISD::INC;
18953 Cond = X86::COND_O;
18956 BaseOp = X86ISD::ADD;
18957 Cond = X86::COND_O;
18960 BaseOp = X86ISD::ADD;
18961 Cond = X86::COND_B;
18964 // A subtract of one will be selected as a DEC. Note that DEC doesn't
18965 // set CF, so we can't do this for USUBO.
18966 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
18968 BaseOp = X86ISD::DEC;
18969 Cond = X86::COND_O;
18972 BaseOp = X86ISD::SUB;
18973 Cond = X86::COND_O;
18976 BaseOp = X86ISD::SUB;
18977 Cond = X86::COND_B;
18980 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
18981 Cond = X86::COND_O;
18983 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
18984 if (N->getValueType(0) == MVT::i8) {
18985 BaseOp = X86ISD::UMUL8;
18986 Cond = X86::COND_O;
18989 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
18991 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
18994 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
18995 DAG.getConstant(X86::COND_O, MVT::i32),
18996 SDValue(Sum.getNode(), 2));
18998 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19002 // Also sets EFLAGS.
19003 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19004 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19007 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19008 DAG.getConstant(Cond, MVT::i32),
19009 SDValue(Sum.getNode(), 1));
19011 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19014 // Sign extension of the low part of vector elements. This may be used either
19015 // when sign extend instructions are not available or if the vector element
19016 // sizes already match the sign-extended size. If the vector elements are in
19017 // their pre-extended size and sign extend instructions are available, that will
19018 // be handled by LowerSIGN_EXTEND.
19019 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19020 SelectionDAG &DAG) const {
19022 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19023 MVT VT = Op.getSimpleValueType();
19025 if (!Subtarget->hasSSE2() || !VT.isVector())
19028 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19029 ExtraVT.getScalarType().getSizeInBits();
19031 switch (VT.SimpleTy) {
19032 default: return SDValue();
19035 if (!Subtarget->hasFp256())
19037 if (!Subtarget->hasInt256()) {
19038 // needs to be split
19039 unsigned NumElems = VT.getVectorNumElements();
19041 // Extract the LHS vectors
19042 SDValue LHS = Op.getOperand(0);
19043 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19044 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19046 MVT EltVT = VT.getVectorElementType();
19047 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19049 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19050 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19051 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19053 SDValue Extra = DAG.getValueType(ExtraVT);
19055 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19056 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19058 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19063 SDValue Op0 = Op.getOperand(0);
19065 // This is a sign extension of some low part of vector elements without
19066 // changing the size of the vector elements themselves:
19067 // Shift-Left + Shift-Right-Algebraic.
19068 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19070 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19076 /// Returns true if the operand type is exactly twice the native width, and
19077 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19078 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19079 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19080 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19081 const X86Subtarget &Subtarget =
19082 getTargetMachine().getSubtarget<X86Subtarget>();
19083 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19086 return !Subtarget.is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19087 else if (OpWidth == 128)
19088 return Subtarget.hasCmpxchg16b();
19093 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19094 return needsCmpXchgNb(SI->getValueOperand()->getType());
19097 // Note: this turns large loads into lock cmpxchg8b/16b.
19098 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19099 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19100 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19101 return needsCmpXchgNb(PTy->getElementType());
19104 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19105 const X86Subtarget &Subtarget =
19106 getTargetMachine().getSubtarget<X86Subtarget>();
19107 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
19108 const Type *MemType = AI->getType();
19110 // If the operand is too big, we must see if cmpxchg8/16b is available
19111 // and default to library calls otherwise.
19112 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19113 return needsCmpXchgNb(MemType);
19115 AtomicRMWInst::BinOp Op = AI->getOperation();
19118 llvm_unreachable("Unknown atomic operation");
19119 case AtomicRMWInst::Xchg:
19120 case AtomicRMWInst::Add:
19121 case AtomicRMWInst::Sub:
19122 // It's better to use xadd, xsub or xchg for these in all cases.
19124 case AtomicRMWInst::Or:
19125 case AtomicRMWInst::And:
19126 case AtomicRMWInst::Xor:
19127 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19128 // prefix to a normal instruction for these operations.
19129 return !AI->use_empty();
19130 case AtomicRMWInst::Nand:
19131 case AtomicRMWInst::Max:
19132 case AtomicRMWInst::Min:
19133 case AtomicRMWInst::UMax:
19134 case AtomicRMWInst::UMin:
19135 // These always require a non-trivial set of data operations on x86. We must
19136 // use a cmpxchg loop.
19141 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19142 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19143 // no-sse2). There isn't any reason to disable it if the target processor
19145 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19149 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19150 const X86Subtarget &Subtarget =
19151 getTargetMachine().getSubtarget<X86Subtarget>();
19152 unsigned NativeWidth = Subtarget.is64Bit() ? 64 : 32;
19153 const Type *MemType = AI->getType();
19154 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19155 // there is no benefit in turning such RMWs into loads, and it is actually
19156 // harmful as it introduces a mfence.
19157 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19160 auto Builder = IRBuilder<>(AI);
19161 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19162 auto SynchScope = AI->getSynchScope();
19163 // We must restrict the ordering to avoid generating loads with Release or
19164 // ReleaseAcquire orderings.
19165 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19166 auto Ptr = AI->getPointerOperand();
19168 // Before the load we need a fence. Here is an example lifted from
19169 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19172 // x.store(1, relaxed);
19173 // r1 = y.fetch_add(0, release);
19175 // y.fetch_add(42, acquire);
19176 // r2 = x.load(relaxed);
19177 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19178 // lowered to just a load without a fence. A mfence flushes the store buffer,
19179 // making the optimization clearly correct.
19180 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19181 // otherwise, we might be able to be more agressive on relaxed idempotent
19182 // rmw. In practice, they do not look useful, so we don't try to be
19183 // especially clever.
19184 if (SynchScope == SingleThread) {
19185 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19186 // the IR level, so we must wrap it in an intrinsic.
19188 } else if (hasMFENCE(Subtarget)) {
19189 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19190 Intrinsic::x86_sse2_mfence);
19191 Builder.CreateCall(MFence);
19193 // FIXME: it might make sense to use a locked operation here but on a
19194 // different cache-line to prevent cache-line bouncing. In practice it
19195 // is probably a small win, and x86 processors without mfence are rare
19196 // enough that we do not bother.
19200 // Finally we can emit the atomic load.
19201 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19202 AI->getType()->getPrimitiveSizeInBits());
19203 Loaded->setAtomic(Order, SynchScope);
19204 AI->replaceAllUsesWith(Loaded);
19205 AI->eraseFromParent();
19209 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19210 SelectionDAG &DAG) {
19212 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19213 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19214 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19215 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19217 // The only fence that needs an instruction is a sequentially-consistent
19218 // cross-thread fence.
19219 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19220 if (hasMFENCE(*Subtarget))
19221 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19223 SDValue Chain = Op.getOperand(0);
19224 SDValue Zero = DAG.getConstant(0, MVT::i32);
19226 DAG.getRegister(X86::ESP, MVT::i32), // Base
19227 DAG.getTargetConstant(1, MVT::i8), // Scale
19228 DAG.getRegister(0, MVT::i32), // Index
19229 DAG.getTargetConstant(0, MVT::i32), // Disp
19230 DAG.getRegister(0, MVT::i32), // Segment.
19234 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19235 return SDValue(Res, 0);
19238 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19239 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19242 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19243 SelectionDAG &DAG) {
19244 MVT T = Op.getSimpleValueType();
19248 switch(T.SimpleTy) {
19249 default: llvm_unreachable("Invalid value type!");
19250 case MVT::i8: Reg = X86::AL; size = 1; break;
19251 case MVT::i16: Reg = X86::AX; size = 2; break;
19252 case MVT::i32: Reg = X86::EAX; size = 4; break;
19254 assert(Subtarget->is64Bit() && "Node not type legal!");
19255 Reg = X86::RAX; size = 8;
19258 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19259 Op.getOperand(2), SDValue());
19260 SDValue Ops[] = { cpIn.getValue(0),
19263 DAG.getTargetConstant(size, MVT::i8),
19264 cpIn.getValue(1) };
19265 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19266 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19267 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19271 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19272 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19273 MVT::i32, cpOut.getValue(2));
19274 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19275 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19277 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19278 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19279 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19283 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19284 SelectionDAG &DAG) {
19285 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19286 MVT DstVT = Op.getSimpleValueType();
19288 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19289 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19290 if (DstVT != MVT::f64)
19291 // This conversion needs to be expanded.
19294 SDValue InVec = Op->getOperand(0);
19296 unsigned NumElts = SrcVT.getVectorNumElements();
19297 EVT SVT = SrcVT.getVectorElementType();
19299 // Widen the vector in input in the case of MVT::v2i32.
19300 // Example: from MVT::v2i32 to MVT::v4i32.
19301 SmallVector<SDValue, 16> Elts;
19302 for (unsigned i = 0, e = NumElts; i != e; ++i)
19303 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19304 DAG.getIntPtrConstant(i)));
19306 // Explicitly mark the extra elements as Undef.
19307 SDValue Undef = DAG.getUNDEF(SVT);
19308 for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
19309 Elts.push_back(Undef);
19311 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19312 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19313 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19314 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19315 DAG.getIntPtrConstant(0));
19318 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19319 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19320 assert((DstVT == MVT::i64 ||
19321 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19322 "Unexpected custom BITCAST");
19323 // i64 <=> MMX conversions are Legal.
19324 if (SrcVT==MVT::i64 && DstVT.isVector())
19326 if (DstVT==MVT::i64 && SrcVT.isVector())
19328 // MMX <=> MMX conversions are Legal.
19329 if (SrcVT.isVector() && DstVT.isVector())
19331 // All other conversions need to be expanded.
19335 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19336 SelectionDAG &DAG) {
19337 SDNode *Node = Op.getNode();
19340 Op = Op.getOperand(0);
19341 EVT VT = Op.getValueType();
19342 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19343 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19345 unsigned NumElts = VT.getVectorNumElements();
19346 EVT EltVT = VT.getVectorElementType();
19347 unsigned Len = EltVT.getSizeInBits();
19349 // This is the vectorized version of the "best" algorithm from
19350 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19351 // with a minor tweak to use a series of adds + shifts instead of vector
19352 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19354 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19355 // v8i32 => Always profitable
19357 // FIXME: There a couple of possible improvements:
19359 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19360 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19362 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19363 "CTPOP not implemented for this vector element type.");
19365 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19366 // extra legalization.
19367 bool NeedsBitcast = EltVT == MVT::i32;
19368 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19370 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19371 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19372 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19374 // v = v - ((v >> 1) & 0x55555555...)
19375 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19376 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19377 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19379 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19381 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19382 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19384 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19386 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19387 if (VT != And.getValueType())
19388 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19389 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19391 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19392 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19393 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19394 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19395 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19397 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19398 if (NeedsBitcast) {
19399 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19400 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19401 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19404 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19405 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19406 if (VT != AndRHS.getValueType()) {
19407 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19408 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19410 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19412 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19413 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19414 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19415 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19416 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19418 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19419 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19420 if (NeedsBitcast) {
19421 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19422 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19424 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19425 if (VT != And.getValueType())
19426 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19428 // The algorithm mentioned above uses:
19429 // v = (v * 0x01010101...) >> (Len - 8)
19431 // Change it to use vector adds + vector shifts which yield faster results on
19432 // Haswell than using vector integer multiplication.
19434 // For i32 elements:
19435 // v = v + (v >> 8)
19436 // v = v + (v >> 16)
19438 // For i64 elements:
19439 // v = v + (v >> 8)
19440 // v = v + (v >> 16)
19441 // v = v + (v >> 32)
19444 SmallVector<SDValue, 8> Csts;
19445 for (unsigned i = 8; i <= Len/2; i *= 2) {
19446 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19447 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19448 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19449 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19453 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19454 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19455 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19456 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19457 if (NeedsBitcast) {
19458 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19459 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19461 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19462 if (VT != And.getValueType())
19463 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19468 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19469 SDNode *Node = Op.getNode();
19471 EVT T = Node->getValueType(0);
19472 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19473 DAG.getConstant(0, T), Node->getOperand(2));
19474 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19475 cast<AtomicSDNode>(Node)->getMemoryVT(),
19476 Node->getOperand(0),
19477 Node->getOperand(1), negOp,
19478 cast<AtomicSDNode>(Node)->getMemOperand(),
19479 cast<AtomicSDNode>(Node)->getOrdering(),
19480 cast<AtomicSDNode>(Node)->getSynchScope());
19483 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19484 SDNode *Node = Op.getNode();
19486 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19488 // Convert seq_cst store -> xchg
19489 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19490 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19491 // (The only way to get a 16-byte store is cmpxchg16b)
19492 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19493 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19494 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19495 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19496 cast<AtomicSDNode>(Node)->getMemoryVT(),
19497 Node->getOperand(0),
19498 Node->getOperand(1), Node->getOperand(2),
19499 cast<AtomicSDNode>(Node)->getMemOperand(),
19500 cast<AtomicSDNode>(Node)->getOrdering(),
19501 cast<AtomicSDNode>(Node)->getSynchScope());
19502 return Swap.getValue(1);
19504 // Other atomic stores have a simple pattern.
19508 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19509 EVT VT = Op.getNode()->getSimpleValueType(0);
19511 // Let legalize expand this if it isn't a legal type yet.
19512 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19515 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19518 bool ExtraOp = false;
19519 switch (Op.getOpcode()) {
19520 default: llvm_unreachable("Invalid code");
19521 case ISD::ADDC: Opc = X86ISD::ADD; break;
19522 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19523 case ISD::SUBC: Opc = X86ISD::SUB; break;
19524 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19528 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19530 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19531 Op.getOperand(1), Op.getOperand(2));
19534 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19535 SelectionDAG &DAG) {
19536 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19538 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19539 // which returns the values as { float, float } (in XMM0) or
19540 // { double, double } (which is returned in XMM0, XMM1).
19542 SDValue Arg = Op.getOperand(0);
19543 EVT ArgVT = Arg.getValueType();
19544 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19546 TargetLowering::ArgListTy Args;
19547 TargetLowering::ArgListEntry Entry;
19551 Entry.isSExt = false;
19552 Entry.isZExt = false;
19553 Args.push_back(Entry);
19555 bool isF64 = ArgVT == MVT::f64;
19556 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19557 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19558 // the results are returned via SRet in memory.
19559 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19560 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19561 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
19563 Type *RetTy = isF64
19564 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19565 : (Type*)VectorType::get(ArgTy, 4);
19567 TargetLowering::CallLoweringInfo CLI(DAG);
19568 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19569 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19571 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19574 // Returned in xmm0 and xmm1.
19575 return CallResult.first;
19577 // Returned in bits 0:31 and 32:64 xmm0.
19578 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19579 CallResult.first, DAG.getIntPtrConstant(0));
19580 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19581 CallResult.first, DAG.getIntPtrConstant(1));
19582 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19583 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19586 /// LowerOperation - Provide custom lowering hooks for some operations.
19588 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
19589 switch (Op.getOpcode()) {
19590 default: llvm_unreachable("Should not custom lower this!");
19591 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
19592 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
19593 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
19594 return LowerCMP_SWAP(Op, Subtarget, DAG);
19595 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
19596 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
19597 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
19598 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
19599 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
19600 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
19601 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
19602 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
19603 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
19604 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
19605 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
19606 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
19607 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
19608 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
19609 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
19610 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
19611 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
19612 case ISD::SHL_PARTS:
19613 case ISD::SRA_PARTS:
19614 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
19615 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
19616 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
19617 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
19618 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
19619 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
19620 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
19621 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
19622 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
19623 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
19624 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
19626 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
19627 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
19628 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
19629 case ISD::SETCC: return LowerSETCC(Op, DAG);
19630 case ISD::SELECT: return LowerSELECT(Op, DAG);
19631 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
19632 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
19633 case ISD::VASTART: return LowerVASTART(Op, DAG);
19634 case ISD::VAARG: return LowerVAARG(Op, DAG);
19635 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
19636 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
19637 case ISD::INTRINSIC_VOID:
19638 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
19639 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
19640 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
19641 case ISD::FRAME_TO_ARGS_OFFSET:
19642 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
19643 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
19644 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
19645 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
19646 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
19647 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
19648 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
19649 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
19650 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
19651 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
19652 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
19653 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
19654 case ISD::UMUL_LOHI:
19655 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
19658 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
19664 case ISD::UMULO: return LowerXALUO(Op, DAG);
19665 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
19666 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
19670 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
19671 case ISD::ADD: return LowerADD(Op, DAG);
19672 case ISD::SUB: return LowerSUB(Op, DAG);
19673 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
19677 /// ReplaceNodeResults - Replace a node with an illegal result type
19678 /// with a new node built out of custom code.
19679 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
19680 SmallVectorImpl<SDValue>&Results,
19681 SelectionDAG &DAG) const {
19683 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19684 switch (N->getOpcode()) {
19686 llvm_unreachable("Do not know how to custom type legalize this operation!");
19687 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
19688 case X86ISD::FMINC:
19690 case X86ISD::FMAXC:
19691 case X86ISD::FMAX: {
19692 EVT VT = N->getValueType(0);
19693 if (VT != MVT::v2f32)
19694 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
19695 SDValue UNDEF = DAG.getUNDEF(VT);
19696 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
19697 N->getOperand(0), UNDEF);
19698 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
19699 N->getOperand(1), UNDEF);
19700 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
19703 case ISD::SIGN_EXTEND_INREG:
19708 // We don't want to expand or promote these.
19715 case ISD::UDIVREM: {
19716 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
19717 Results.push_back(V);
19720 case ISD::FP_TO_SINT:
19721 case ISD::FP_TO_UINT: {
19722 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
19724 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
19727 std::pair<SDValue,SDValue> Vals =
19728 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
19729 SDValue FIST = Vals.first, StackSlot = Vals.second;
19730 if (FIST.getNode()) {
19731 EVT VT = N->getValueType(0);
19732 // Return a load from the stack slot.
19733 if (StackSlot.getNode())
19734 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
19735 MachinePointerInfo(),
19736 false, false, false, 0));
19738 Results.push_back(FIST);
19742 case ISD::UINT_TO_FP: {
19743 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19744 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
19745 N->getValueType(0) != MVT::v2f32)
19747 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
19749 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
19751 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
19752 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
19753 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
19754 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
19755 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
19756 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
19759 case ISD::FP_ROUND: {
19760 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
19762 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
19763 Results.push_back(V);
19766 case ISD::INTRINSIC_W_CHAIN: {
19767 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
19769 default : llvm_unreachable("Do not know how to custom type "
19770 "legalize this intrinsic operation!");
19771 case Intrinsic::x86_rdtsc:
19772 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
19774 case Intrinsic::x86_rdtscp:
19775 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
19777 case Intrinsic::x86_rdpmc:
19778 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
19781 case ISD::READCYCLECOUNTER: {
19782 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
19785 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
19786 EVT T = N->getValueType(0);
19787 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
19788 bool Regs64bit = T == MVT::i128;
19789 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
19790 SDValue cpInL, cpInH;
19791 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
19792 DAG.getConstant(0, HalfT));
19793 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
19794 DAG.getConstant(1, HalfT));
19795 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
19796 Regs64bit ? X86::RAX : X86::EAX,
19798 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
19799 Regs64bit ? X86::RDX : X86::EDX,
19800 cpInH, cpInL.getValue(1));
19801 SDValue swapInL, swapInH;
19802 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
19803 DAG.getConstant(0, HalfT));
19804 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
19805 DAG.getConstant(1, HalfT));
19806 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
19807 Regs64bit ? X86::RBX : X86::EBX,
19808 swapInL, cpInH.getValue(1));
19809 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
19810 Regs64bit ? X86::RCX : X86::ECX,
19811 swapInH, swapInL.getValue(1));
19812 SDValue Ops[] = { swapInH.getValue(0),
19814 swapInH.getValue(1) };
19815 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19816 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
19817 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
19818 X86ISD::LCMPXCHG8_DAG;
19819 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
19820 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
19821 Regs64bit ? X86::RAX : X86::EAX,
19822 HalfT, Result.getValue(1));
19823 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
19824 Regs64bit ? X86::RDX : X86::EDX,
19825 HalfT, cpOutL.getValue(2));
19826 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
19828 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
19829 MVT::i32, cpOutH.getValue(2));
19831 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
19832 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19833 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
19835 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
19836 Results.push_back(Success);
19837 Results.push_back(EFLAGS.getValue(1));
19840 case ISD::ATOMIC_SWAP:
19841 case ISD::ATOMIC_LOAD_ADD:
19842 case ISD::ATOMIC_LOAD_SUB:
19843 case ISD::ATOMIC_LOAD_AND:
19844 case ISD::ATOMIC_LOAD_OR:
19845 case ISD::ATOMIC_LOAD_XOR:
19846 case ISD::ATOMIC_LOAD_NAND:
19847 case ISD::ATOMIC_LOAD_MIN:
19848 case ISD::ATOMIC_LOAD_MAX:
19849 case ISD::ATOMIC_LOAD_UMIN:
19850 case ISD::ATOMIC_LOAD_UMAX:
19851 case ISD::ATOMIC_LOAD: {
19852 // Delegate to generic TypeLegalization. Situations we can really handle
19853 // should have already been dealt with by AtomicExpandPass.cpp.
19856 case ISD::BITCAST: {
19857 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19858 EVT DstVT = N->getValueType(0);
19859 EVT SrcVT = N->getOperand(0)->getValueType(0);
19861 if (SrcVT != MVT::f64 ||
19862 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
19865 unsigned NumElts = DstVT.getVectorNumElements();
19866 EVT SVT = DstVT.getVectorElementType();
19867 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19868 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
19869 MVT::v2f64, N->getOperand(0));
19870 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
19872 if (ExperimentalVectorWideningLegalization) {
19873 // If we are legalizing vectors by widening, we already have the desired
19874 // legal vector type, just return it.
19875 Results.push_back(ToVecInt);
19879 SmallVector<SDValue, 8> Elts;
19880 for (unsigned i = 0, e = NumElts; i != e; ++i)
19881 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
19882 ToVecInt, DAG.getIntPtrConstant(i)));
19884 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
19889 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
19891 default: return nullptr;
19892 case X86ISD::BSF: return "X86ISD::BSF";
19893 case X86ISD::BSR: return "X86ISD::BSR";
19894 case X86ISD::SHLD: return "X86ISD::SHLD";
19895 case X86ISD::SHRD: return "X86ISD::SHRD";
19896 case X86ISD::FAND: return "X86ISD::FAND";
19897 case X86ISD::FANDN: return "X86ISD::FANDN";
19898 case X86ISD::FOR: return "X86ISD::FOR";
19899 case X86ISD::FXOR: return "X86ISD::FXOR";
19900 case X86ISD::FSRL: return "X86ISD::FSRL";
19901 case X86ISD::FILD: return "X86ISD::FILD";
19902 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
19903 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
19904 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
19905 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
19906 case X86ISD::FLD: return "X86ISD::FLD";
19907 case X86ISD::FST: return "X86ISD::FST";
19908 case X86ISD::CALL: return "X86ISD::CALL";
19909 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
19910 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
19911 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
19912 case X86ISD::BT: return "X86ISD::BT";
19913 case X86ISD::CMP: return "X86ISD::CMP";
19914 case X86ISD::COMI: return "X86ISD::COMI";
19915 case X86ISD::UCOMI: return "X86ISD::UCOMI";
19916 case X86ISD::CMPM: return "X86ISD::CMPM";
19917 case X86ISD::CMPMU: return "X86ISD::CMPMU";
19918 case X86ISD::SETCC: return "X86ISD::SETCC";
19919 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
19920 case X86ISD::FSETCC: return "X86ISD::FSETCC";
19921 case X86ISD::CMOV: return "X86ISD::CMOV";
19922 case X86ISD::BRCOND: return "X86ISD::BRCOND";
19923 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
19924 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
19925 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
19926 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
19927 case X86ISD::Wrapper: return "X86ISD::Wrapper";
19928 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
19929 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
19930 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
19931 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
19932 case X86ISD::PINSRB: return "X86ISD::PINSRB";
19933 case X86ISD::PINSRW: return "X86ISD::PINSRW";
19934 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
19935 case X86ISD::ANDNP: return "X86ISD::ANDNP";
19936 case X86ISD::PSIGN: return "X86ISD::PSIGN";
19937 case X86ISD::BLENDI: return "X86ISD::BLENDI";
19938 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
19939 case X86ISD::SUBUS: return "X86ISD::SUBUS";
19940 case X86ISD::HADD: return "X86ISD::HADD";
19941 case X86ISD::HSUB: return "X86ISD::HSUB";
19942 case X86ISD::FHADD: return "X86ISD::FHADD";
19943 case X86ISD::FHSUB: return "X86ISD::FHSUB";
19944 case X86ISD::UMAX: return "X86ISD::UMAX";
19945 case X86ISD::UMIN: return "X86ISD::UMIN";
19946 case X86ISD::SMAX: return "X86ISD::SMAX";
19947 case X86ISD::SMIN: return "X86ISD::SMIN";
19948 case X86ISD::FMAX: return "X86ISD::FMAX";
19949 case X86ISD::FMIN: return "X86ISD::FMIN";
19950 case X86ISD::FMAXC: return "X86ISD::FMAXC";
19951 case X86ISD::FMINC: return "X86ISD::FMINC";
19952 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
19953 case X86ISD::FRCP: return "X86ISD::FRCP";
19954 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
19955 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
19956 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
19957 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
19958 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
19959 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
19960 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
19961 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
19962 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
19963 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
19964 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
19965 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
19966 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
19967 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
19968 case X86ISD::VZEXT: return "X86ISD::VZEXT";
19969 case X86ISD::VSEXT: return "X86ISD::VSEXT";
19970 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
19971 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
19972 case X86ISD::VINSERT: return "X86ISD::VINSERT";
19973 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
19974 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
19975 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
19976 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
19977 case X86ISD::VSHL: return "X86ISD::VSHL";
19978 case X86ISD::VSRL: return "X86ISD::VSRL";
19979 case X86ISD::VSRA: return "X86ISD::VSRA";
19980 case X86ISD::VSHLI: return "X86ISD::VSHLI";
19981 case X86ISD::VSRLI: return "X86ISD::VSRLI";
19982 case X86ISD::VSRAI: return "X86ISD::VSRAI";
19983 case X86ISD::CMPP: return "X86ISD::CMPP";
19984 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
19985 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
19986 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
19987 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
19988 case X86ISD::ADD: return "X86ISD::ADD";
19989 case X86ISD::SUB: return "X86ISD::SUB";
19990 case X86ISD::ADC: return "X86ISD::ADC";
19991 case X86ISD::SBB: return "X86ISD::SBB";
19992 case X86ISD::SMUL: return "X86ISD::SMUL";
19993 case X86ISD::UMUL: return "X86ISD::UMUL";
19994 case X86ISD::SMUL8: return "X86ISD::SMUL8";
19995 case X86ISD::UMUL8: return "X86ISD::UMUL8";
19996 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
19997 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
19998 case X86ISD::INC: return "X86ISD::INC";
19999 case X86ISD::DEC: return "X86ISD::DEC";
20000 case X86ISD::OR: return "X86ISD::OR";
20001 case X86ISD::XOR: return "X86ISD::XOR";
20002 case X86ISD::AND: return "X86ISD::AND";
20003 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20004 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20005 case X86ISD::PTEST: return "X86ISD::PTEST";
20006 case X86ISD::TESTP: return "X86ISD::TESTP";
20007 case X86ISD::TESTM: return "X86ISD::TESTM";
20008 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20009 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20010 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20011 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20012 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20013 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20014 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20015 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20016 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20017 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20018 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20019 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20020 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20021 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20022 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20023 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20024 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20025 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20026 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20027 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20028 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20029 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20030 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20031 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20032 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20033 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20034 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20035 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20036 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20037 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20038 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20039 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20040 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20041 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20042 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20043 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20044 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20045 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20046 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20047 case X86ISD::SAHF: return "X86ISD::SAHF";
20048 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20049 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20050 case X86ISD::FMADD: return "X86ISD::FMADD";
20051 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20052 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20053 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20054 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20055 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20056 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20057 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20058 case X86ISD::XTEST: return "X86ISD::XTEST";
20059 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20060 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20061 case X86ISD::SELECT: return "X86ISD::SELECT";
20065 // isLegalAddressingMode - Return true if the addressing mode represented
20066 // by AM is legal for this target, for a load/store of the specified type.
20067 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20069 // X86 supports extremely general addressing modes.
20070 CodeModel::Model M = getTargetMachine().getCodeModel();
20071 Reloc::Model R = getTargetMachine().getRelocationModel();
20073 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20074 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20079 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20081 // If a reference to this global requires an extra load, we can't fold it.
20082 if (isGlobalStubReference(GVFlags))
20085 // If BaseGV requires a register for the PIC base, we cannot also have a
20086 // BaseReg specified.
20087 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20090 // If lower 4G is not available, then we must use rip-relative addressing.
20091 if ((M != CodeModel::Small || R != Reloc::Static) &&
20092 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20096 switch (AM.Scale) {
20102 // These scales always work.
20107 // These scales are formed with basereg+scalereg. Only accept if there is
20112 default: // Other stuff never works.
20119 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20120 unsigned Bits = Ty->getScalarSizeInBits();
20122 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20123 // particularly cheaper than those without.
20127 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20128 // variable shifts just as cheap as scalar ones.
20129 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20132 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20133 // fully general vector.
20137 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20138 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20140 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20141 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20142 return NumBits1 > NumBits2;
20145 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20146 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20149 if (!isTypeLegal(EVT::getEVT(Ty1)))
20152 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20154 // Assuming the caller doesn't have a zeroext or signext return parameter,
20155 // truncation all the way down to i1 is valid.
20159 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20160 return isInt<32>(Imm);
20163 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20164 // Can also use sub to handle negated immediates.
20165 return isInt<32>(Imm);
20168 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20169 if (!VT1.isInteger() || !VT2.isInteger())
20171 unsigned NumBits1 = VT1.getSizeInBits();
20172 unsigned NumBits2 = VT2.getSizeInBits();
20173 return NumBits1 > NumBits2;
20176 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20177 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20178 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20181 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20182 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20183 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20186 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20187 EVT VT1 = Val.getValueType();
20188 if (isZExtFree(VT1, VT2))
20191 if (Val.getOpcode() != ISD::LOAD)
20194 if (!VT1.isSimple() || !VT1.isInteger() ||
20195 !VT2.isSimple() || !VT2.isInteger())
20198 switch (VT1.getSimpleVT().SimpleTy) {
20203 // X86 has 8, 16, and 32-bit zero-extending loads.
20211 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20212 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20215 VT = VT.getScalarType();
20217 if (!VT.isSimple())
20220 switch (VT.getSimpleVT().SimpleTy) {
20231 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20232 // i16 instructions are longer (0x66 prefix) and potentially slower.
20233 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20236 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20237 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20238 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20239 /// are assumed to be legal.
20241 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20243 if (!VT.isSimple())
20246 MVT SVT = VT.getSimpleVT();
20248 // Very little shuffling can be done for 64-bit vectors right now.
20249 if (VT.getSizeInBits() == 64)
20252 // This is an experimental legality test that is tailored to match the
20253 // legality test of the experimental lowering more closely. They are gated
20254 // separately to ease testing of performance differences.
20255 if (ExperimentalVectorShuffleLegality)
20256 // We only care that the types being shuffled are legal. The lowering can
20257 // handle any possible shuffle mask that results.
20258 return isTypeLegal(SVT);
20260 // If this is a single-input shuffle with no 128 bit lane crossings we can
20261 // lower it into pshufb.
20262 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20263 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20264 bool isLegal = true;
20265 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20266 if (M[I] >= (int)SVT.getVectorNumElements() ||
20267 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20276 // FIXME: blends, shifts.
20277 return (SVT.getVectorNumElements() == 2 ||
20278 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20279 isMOVLMask(M, SVT) ||
20280 isCommutedMOVLMask(M, SVT) ||
20281 isMOVHLPSMask(M, SVT) ||
20282 isSHUFPMask(M, SVT) ||
20283 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20284 isPSHUFDMask(M, SVT) ||
20285 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20286 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20287 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20288 isPALIGNRMask(M, SVT, Subtarget) ||
20289 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20290 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20291 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20292 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20293 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20294 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20298 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20300 if (!VT.isSimple())
20303 MVT SVT = VT.getSimpleVT();
20305 // This is an experimental legality test that is tailored to match the
20306 // legality test of the experimental lowering more closely. They are gated
20307 // separately to ease testing of performance differences.
20308 if (ExperimentalVectorShuffleLegality)
20309 // The new vector shuffle lowering is very good at managing zero-inputs.
20310 return isShuffleMaskLegal(Mask, VT);
20312 unsigned NumElts = SVT.getVectorNumElements();
20313 // FIXME: This collection of masks seems suspect.
20316 if (NumElts == 4 && SVT.is128BitVector()) {
20317 return (isMOVLMask(Mask, SVT) ||
20318 isCommutedMOVLMask(Mask, SVT, true) ||
20319 isSHUFPMask(Mask, SVT) ||
20320 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20321 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20322 Subtarget->hasInt256()));
20327 //===----------------------------------------------------------------------===//
20328 // X86 Scheduler Hooks
20329 //===----------------------------------------------------------------------===//
20331 /// Utility function to emit xbegin specifying the start of an RTM region.
20332 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20333 const TargetInstrInfo *TII) {
20334 DebugLoc DL = MI->getDebugLoc();
20336 const BasicBlock *BB = MBB->getBasicBlock();
20337 MachineFunction::iterator I = MBB;
20340 // For the v = xbegin(), we generate
20351 MachineBasicBlock *thisMBB = MBB;
20352 MachineFunction *MF = MBB->getParent();
20353 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20354 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20355 MF->insert(I, mainMBB);
20356 MF->insert(I, sinkMBB);
20358 // Transfer the remainder of BB and its successor edges to sinkMBB.
20359 sinkMBB->splice(sinkMBB->begin(), MBB,
20360 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20361 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20365 // # fallthrough to mainMBB
20366 // # abortion to sinkMBB
20367 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20368 thisMBB->addSuccessor(mainMBB);
20369 thisMBB->addSuccessor(sinkMBB);
20373 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20374 mainMBB->addSuccessor(sinkMBB);
20377 // EAX is live into the sinkMBB
20378 sinkMBB->addLiveIn(X86::EAX);
20379 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20380 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20383 MI->eraseFromParent();
20387 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20388 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20389 // in the .td file.
20390 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20391 const TargetInstrInfo *TII) {
20393 switch (MI->getOpcode()) {
20394 default: llvm_unreachable("illegal opcode!");
20395 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20396 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20397 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20398 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20399 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20400 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20401 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20402 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20405 DebugLoc dl = MI->getDebugLoc();
20406 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20408 unsigned NumArgs = MI->getNumOperands();
20409 for (unsigned i = 1; i < NumArgs; ++i) {
20410 MachineOperand &Op = MI->getOperand(i);
20411 if (!(Op.isReg() && Op.isImplicit()))
20412 MIB.addOperand(Op);
20414 if (MI->hasOneMemOperand())
20415 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20417 BuildMI(*BB, MI, dl,
20418 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20419 .addReg(X86::XMM0);
20421 MI->eraseFromParent();
20425 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20426 // defs in an instruction pattern
20427 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20428 const TargetInstrInfo *TII) {
20430 switch (MI->getOpcode()) {
20431 default: llvm_unreachable("illegal opcode!");
20432 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20433 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20434 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20435 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20436 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20437 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20438 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20439 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20442 DebugLoc dl = MI->getDebugLoc();
20443 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20445 unsigned NumArgs = MI->getNumOperands(); // remove the results
20446 for (unsigned i = 1; i < NumArgs; ++i) {
20447 MachineOperand &Op = MI->getOperand(i);
20448 if (!(Op.isReg() && Op.isImplicit()))
20449 MIB.addOperand(Op);
20451 if (MI->hasOneMemOperand())
20452 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20454 BuildMI(*BB, MI, dl,
20455 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20458 MI->eraseFromParent();
20462 static MachineBasicBlock * EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20463 const TargetInstrInfo *TII,
20464 const X86Subtarget* Subtarget) {
20465 DebugLoc dl = MI->getDebugLoc();
20467 // Address into RAX/EAX, other two args into ECX, EDX.
20468 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20469 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20470 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20471 for (int i = 0; i < X86::AddrNumOperands; ++i)
20472 MIB.addOperand(MI->getOperand(i));
20474 unsigned ValOps = X86::AddrNumOperands;
20475 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20476 .addReg(MI->getOperand(ValOps).getReg());
20477 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20478 .addReg(MI->getOperand(ValOps+1).getReg());
20480 // The instruction doesn't actually take any operands though.
20481 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20483 MI->eraseFromParent(); // The pseudo is gone now.
20487 MachineBasicBlock *
20488 X86TargetLowering::EmitVAARG64WithCustomInserter(
20490 MachineBasicBlock *MBB) const {
20491 // Emit va_arg instruction on X86-64.
20493 // Operands to this pseudo-instruction:
20494 // 0 ) Output : destination address (reg)
20495 // 1-5) Input : va_list address (addr, i64mem)
20496 // 6 ) ArgSize : Size (in bytes) of vararg type
20497 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20498 // 8 ) Align : Alignment of type
20499 // 9 ) EFLAGS (implicit-def)
20501 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20502 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20504 unsigned DestReg = MI->getOperand(0).getReg();
20505 MachineOperand &Base = MI->getOperand(1);
20506 MachineOperand &Scale = MI->getOperand(2);
20507 MachineOperand &Index = MI->getOperand(3);
20508 MachineOperand &Disp = MI->getOperand(4);
20509 MachineOperand &Segment = MI->getOperand(5);
20510 unsigned ArgSize = MI->getOperand(6).getImm();
20511 unsigned ArgMode = MI->getOperand(7).getImm();
20512 unsigned Align = MI->getOperand(8).getImm();
20514 // Memory Reference
20515 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20516 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20517 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20519 // Machine Information
20520 const TargetInstrInfo *TII = MBB->getParent()->getSubtarget().getInstrInfo();
20521 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20522 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20523 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20524 DebugLoc DL = MI->getDebugLoc();
20526 // struct va_list {
20529 // i64 overflow_area (address)
20530 // i64 reg_save_area (address)
20532 // sizeof(va_list) = 24
20533 // alignment(va_list) = 8
20535 unsigned TotalNumIntRegs = 6;
20536 unsigned TotalNumXMMRegs = 8;
20537 bool UseGPOffset = (ArgMode == 1);
20538 bool UseFPOffset = (ArgMode == 2);
20539 unsigned MaxOffset = TotalNumIntRegs * 8 +
20540 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
20542 /* Align ArgSize to a multiple of 8 */
20543 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
20544 bool NeedsAlign = (Align > 8);
20546 MachineBasicBlock *thisMBB = MBB;
20547 MachineBasicBlock *overflowMBB;
20548 MachineBasicBlock *offsetMBB;
20549 MachineBasicBlock *endMBB;
20551 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
20552 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
20553 unsigned OffsetReg = 0;
20555 if (!UseGPOffset && !UseFPOffset) {
20556 // If we only pull from the overflow region, we don't create a branch.
20557 // We don't need to alter control flow.
20558 OffsetDestReg = 0; // unused
20559 OverflowDestReg = DestReg;
20561 offsetMBB = nullptr;
20562 overflowMBB = thisMBB;
20565 // First emit code to check if gp_offset (or fp_offset) is below the bound.
20566 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
20567 // If not, pull from overflow_area. (branch to overflowMBB)
20572 // offsetMBB overflowMBB
20577 // Registers for the PHI in endMBB
20578 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
20579 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
20581 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20582 MachineFunction *MF = MBB->getParent();
20583 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20584 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20585 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20587 MachineFunction::iterator MBBIter = MBB;
20590 // Insert the new basic blocks
20591 MF->insert(MBBIter, offsetMBB);
20592 MF->insert(MBBIter, overflowMBB);
20593 MF->insert(MBBIter, endMBB);
20595 // Transfer the remainder of MBB and its successor edges to endMBB.
20596 endMBB->splice(endMBB->begin(), thisMBB,
20597 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
20598 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
20600 // Make offsetMBB and overflowMBB successors of thisMBB
20601 thisMBB->addSuccessor(offsetMBB);
20602 thisMBB->addSuccessor(overflowMBB);
20604 // endMBB is a successor of both offsetMBB and overflowMBB
20605 offsetMBB->addSuccessor(endMBB);
20606 overflowMBB->addSuccessor(endMBB);
20608 // Load the offset value into a register
20609 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20610 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
20614 .addDisp(Disp, UseFPOffset ? 4 : 0)
20615 .addOperand(Segment)
20616 .setMemRefs(MMOBegin, MMOEnd);
20618 // Check if there is enough room left to pull this argument.
20619 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
20621 .addImm(MaxOffset + 8 - ArgSizeA8);
20623 // Branch to "overflowMBB" if offset >= max
20624 // Fall through to "offsetMBB" otherwise
20625 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
20626 .addMBB(overflowMBB);
20629 // In offsetMBB, emit code to use the reg_save_area.
20631 assert(OffsetReg != 0);
20633 // Read the reg_save_area address.
20634 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
20635 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
20640 .addOperand(Segment)
20641 .setMemRefs(MMOBegin, MMOEnd);
20643 // Zero-extend the offset
20644 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
20645 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
20648 .addImm(X86::sub_32bit);
20650 // Add the offset to the reg_save_area to get the final address.
20651 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
20652 .addReg(OffsetReg64)
20653 .addReg(RegSaveReg);
20655 // Compute the offset for the next argument
20656 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20657 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
20659 .addImm(UseFPOffset ? 16 : 8);
20661 // Store it back into the va_list.
20662 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
20666 .addDisp(Disp, UseFPOffset ? 4 : 0)
20667 .addOperand(Segment)
20668 .addReg(NextOffsetReg)
20669 .setMemRefs(MMOBegin, MMOEnd);
20672 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
20677 // Emit code to use overflow area
20680 // Load the overflow_area address into a register.
20681 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
20682 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
20687 .addOperand(Segment)
20688 .setMemRefs(MMOBegin, MMOEnd);
20690 // If we need to align it, do so. Otherwise, just copy the address
20691 // to OverflowDestReg.
20693 // Align the overflow address
20694 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
20695 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
20697 // aligned_addr = (addr + (align-1)) & ~(align-1)
20698 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
20699 .addReg(OverflowAddrReg)
20702 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
20704 .addImm(~(uint64_t)(Align-1));
20706 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
20707 .addReg(OverflowAddrReg);
20710 // Compute the next overflow address after this argument.
20711 // (the overflow address should be kept 8-byte aligned)
20712 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
20713 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
20714 .addReg(OverflowDestReg)
20715 .addImm(ArgSizeA8);
20717 // Store the new overflow address.
20718 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
20723 .addOperand(Segment)
20724 .addReg(NextAddrReg)
20725 .setMemRefs(MMOBegin, MMOEnd);
20727 // If we branched, emit the PHI to the front of endMBB.
20729 BuildMI(*endMBB, endMBB->begin(), DL,
20730 TII->get(X86::PHI), DestReg)
20731 .addReg(OffsetDestReg).addMBB(offsetMBB)
20732 .addReg(OverflowDestReg).addMBB(overflowMBB);
20735 // Erase the pseudo instruction
20736 MI->eraseFromParent();
20741 MachineBasicBlock *
20742 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
20744 MachineBasicBlock *MBB) const {
20745 // Emit code to save XMM registers to the stack. The ABI says that the
20746 // number of registers to save is given in %al, so it's theoretically
20747 // possible to do an indirect jump trick to avoid saving all of them,
20748 // however this code takes a simpler approach and just executes all
20749 // of the stores if %al is non-zero. It's less code, and it's probably
20750 // easier on the hardware branch predictor, and stores aren't all that
20751 // expensive anyway.
20753 // Create the new basic blocks. One block contains all the XMM stores,
20754 // and one block is the final destination regardless of whether any
20755 // stores were performed.
20756 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20757 MachineFunction *F = MBB->getParent();
20758 MachineFunction::iterator MBBIter = MBB;
20760 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
20761 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
20762 F->insert(MBBIter, XMMSaveMBB);
20763 F->insert(MBBIter, EndMBB);
20765 // Transfer the remainder of MBB and its successor edges to EndMBB.
20766 EndMBB->splice(EndMBB->begin(), MBB,
20767 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20768 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
20770 // The original block will now fall through to the XMM save block.
20771 MBB->addSuccessor(XMMSaveMBB);
20772 // The XMMSaveMBB will fall through to the end block.
20773 XMMSaveMBB->addSuccessor(EndMBB);
20775 // Now add the instructions.
20776 const TargetInstrInfo *TII = MBB->getParent()->getSubtarget().getInstrInfo();
20777 DebugLoc DL = MI->getDebugLoc();
20779 unsigned CountReg = MI->getOperand(0).getReg();
20780 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
20781 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
20783 if (!Subtarget->isTargetWin64()) {
20784 // If %al is 0, branch around the XMM save block.
20785 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
20786 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
20787 MBB->addSuccessor(EndMBB);
20790 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
20791 // that was just emitted, but clearly shouldn't be "saved".
20792 assert((MI->getNumOperands() <= 3 ||
20793 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
20794 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
20795 && "Expected last argument to be EFLAGS");
20796 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
20797 // In the XMM save block, save all the XMM argument registers.
20798 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
20799 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
20800 MachineMemOperand *MMO =
20801 F->getMachineMemOperand(
20802 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
20803 MachineMemOperand::MOStore,
20804 /*Size=*/16, /*Align=*/16);
20805 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
20806 .addFrameIndex(RegSaveFrameIndex)
20807 .addImm(/*Scale=*/1)
20808 .addReg(/*IndexReg=*/0)
20809 .addImm(/*Disp=*/Offset)
20810 .addReg(/*Segment=*/0)
20811 .addReg(MI->getOperand(i).getReg())
20812 .addMemOperand(MMO);
20815 MI->eraseFromParent(); // The pseudo instruction is gone now.
20820 // The EFLAGS operand of SelectItr might be missing a kill marker
20821 // because there were multiple uses of EFLAGS, and ISel didn't know
20822 // which to mark. Figure out whether SelectItr should have had a
20823 // kill marker, and set it if it should. Returns the correct kill
20825 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
20826 MachineBasicBlock* BB,
20827 const TargetRegisterInfo* TRI) {
20828 // Scan forward through BB for a use/def of EFLAGS.
20829 MachineBasicBlock::iterator miI(std::next(SelectItr));
20830 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
20831 const MachineInstr& mi = *miI;
20832 if (mi.readsRegister(X86::EFLAGS))
20834 if (mi.definesRegister(X86::EFLAGS))
20835 break; // Should have kill-flag - update below.
20838 // If we hit the end of the block, check whether EFLAGS is live into a
20840 if (miI == BB->end()) {
20841 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
20842 sEnd = BB->succ_end();
20843 sItr != sEnd; ++sItr) {
20844 MachineBasicBlock* succ = *sItr;
20845 if (succ->isLiveIn(X86::EFLAGS))
20850 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
20851 // out. SelectMI should have a kill flag on EFLAGS.
20852 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
20856 MachineBasicBlock *
20857 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
20858 MachineBasicBlock *BB) const {
20859 const TargetInstrInfo *TII = BB->getParent()->getSubtarget().getInstrInfo();
20860 DebugLoc DL = MI->getDebugLoc();
20862 // To "insert" a SELECT_CC instruction, we actually have to insert the
20863 // diamond control-flow pattern. The incoming instruction knows the
20864 // destination vreg to set, the condition code register to branch on, the
20865 // true/false values to select between, and a branch opcode to use.
20866 const BasicBlock *LLVM_BB = BB->getBasicBlock();
20867 MachineFunction::iterator It = BB;
20873 // cmpTY ccX, r1, r2
20875 // fallthrough --> copy0MBB
20876 MachineBasicBlock *thisMBB = BB;
20877 MachineFunction *F = BB->getParent();
20878 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
20879 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
20880 F->insert(It, copy0MBB);
20881 F->insert(It, sinkMBB);
20883 // If the EFLAGS register isn't dead in the terminator, then claim that it's
20884 // live into the sink and copy blocks.
20885 const TargetRegisterInfo *TRI =
20886 BB->getParent()->getSubtarget().getRegisterInfo();
20887 if (!MI->killsRegister(X86::EFLAGS) &&
20888 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
20889 copy0MBB->addLiveIn(X86::EFLAGS);
20890 sinkMBB->addLiveIn(X86::EFLAGS);
20893 // Transfer the remainder of BB and its successor edges to sinkMBB.
20894 sinkMBB->splice(sinkMBB->begin(), BB,
20895 std::next(MachineBasicBlock::iterator(MI)), BB->end());
20896 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
20898 // Add the true and fallthrough blocks as its successors.
20899 BB->addSuccessor(copy0MBB);
20900 BB->addSuccessor(sinkMBB);
20902 // Create the conditional branch instruction.
20904 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
20905 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
20908 // %FalseValue = ...
20909 // # fallthrough to sinkMBB
20910 copy0MBB->addSuccessor(sinkMBB);
20913 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
20915 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20916 TII->get(X86::PHI), MI->getOperand(0).getReg())
20917 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
20918 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
20920 MI->eraseFromParent(); // The pseudo instruction is gone now.
20924 MachineBasicBlock *
20925 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
20926 MachineBasicBlock *BB) const {
20927 MachineFunction *MF = BB->getParent();
20928 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
20929 DebugLoc DL = MI->getDebugLoc();
20930 const BasicBlock *LLVM_BB = BB->getBasicBlock();
20932 assert(MF->shouldSplitStack());
20934 const bool Is64Bit = Subtarget->is64Bit();
20935 const bool IsLP64 = Subtarget->isTarget64BitLP64();
20937 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
20938 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
20941 // ... [Till the alloca]
20942 // If stacklet is not large enough, jump to mallocMBB
20945 // Allocate by subtracting from RSP
20946 // Jump to continueMBB
20949 // Allocate by call to runtime
20953 // [rest of original BB]
20956 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20957 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20958 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20960 MachineRegisterInfo &MRI = MF->getRegInfo();
20961 const TargetRegisterClass *AddrRegClass =
20962 getRegClassFor(getPointerTy());
20964 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
20965 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
20966 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
20967 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
20968 sizeVReg = MI->getOperand(1).getReg(),
20969 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
20971 MachineFunction::iterator MBBIter = BB;
20974 MF->insert(MBBIter, bumpMBB);
20975 MF->insert(MBBIter, mallocMBB);
20976 MF->insert(MBBIter, continueMBB);
20978 continueMBB->splice(continueMBB->begin(), BB,
20979 std::next(MachineBasicBlock::iterator(MI)), BB->end());
20980 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
20982 // Add code to the main basic block to check if the stack limit has been hit,
20983 // and if so, jump to mallocMBB otherwise to bumpMBB.
20984 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
20985 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
20986 .addReg(tmpSPVReg).addReg(sizeVReg);
20987 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
20988 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
20989 .addReg(SPLimitVReg);
20990 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
20992 // bumpMBB simply decreases the stack pointer, since we know the current
20993 // stacklet has enough space.
20994 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
20995 .addReg(SPLimitVReg);
20996 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
20997 .addReg(SPLimitVReg);
20998 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21000 // Calls into a routine in libgcc to allocate more space from the heap.
21001 const uint32_t *RegMask = MF->getTarget()
21002 .getSubtargetImpl()
21003 ->getRegisterInfo()
21004 ->getCallPreservedMask(CallingConv::C);
21006 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21008 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21009 .addExternalSymbol("__morestack_allocate_stack_space")
21010 .addRegMask(RegMask)
21011 .addReg(X86::RDI, RegState::Implicit)
21012 .addReg(X86::RAX, RegState::ImplicitDefine);
21013 } else if (Is64Bit) {
21014 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21016 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21017 .addExternalSymbol("__morestack_allocate_stack_space")
21018 .addRegMask(RegMask)
21019 .addReg(X86::EDI, RegState::Implicit)
21020 .addReg(X86::EAX, RegState::ImplicitDefine);
21022 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21024 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21025 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21026 .addExternalSymbol("__morestack_allocate_stack_space")
21027 .addRegMask(RegMask)
21028 .addReg(X86::EAX, RegState::ImplicitDefine);
21032 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21035 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21036 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21037 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21039 // Set up the CFG correctly.
21040 BB->addSuccessor(bumpMBB);
21041 BB->addSuccessor(mallocMBB);
21042 mallocMBB->addSuccessor(continueMBB);
21043 bumpMBB->addSuccessor(continueMBB);
21045 // Take care of the PHI nodes.
21046 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21047 MI->getOperand(0).getReg())
21048 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21049 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21051 // Delete the original pseudo instruction.
21052 MI->eraseFromParent();
21055 return continueMBB;
21058 MachineBasicBlock *
21059 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21060 MachineBasicBlock *BB) const {
21061 DebugLoc DL = MI->getDebugLoc();
21063 assert(!Subtarget->isTargetMachO());
21065 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21067 MI->eraseFromParent(); // The pseudo instruction is gone now.
21071 MachineBasicBlock *
21072 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21073 MachineBasicBlock *BB) const {
21074 // This is pretty easy. We're taking the value that we received from
21075 // our load from the relocation, sticking it in either RDI (x86-64)
21076 // or EAX and doing an indirect call. The return value will then
21077 // be in the normal return register.
21078 MachineFunction *F = BB->getParent();
21079 const X86InstrInfo *TII =
21080 static_cast<const X86InstrInfo *>(F->getSubtarget().getInstrInfo());
21081 DebugLoc DL = MI->getDebugLoc();
21083 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21084 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21086 // Get a register mask for the lowered call.
21087 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21088 // proper register mask.
21089 const uint32_t *RegMask = F->getTarget()
21090 .getSubtargetImpl()
21091 ->getRegisterInfo()
21092 ->getCallPreservedMask(CallingConv::C);
21093 if (Subtarget->is64Bit()) {
21094 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21095 TII->get(X86::MOV64rm), X86::RDI)
21097 .addImm(0).addReg(0)
21098 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21099 MI->getOperand(3).getTargetFlags())
21101 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21102 addDirectMem(MIB, X86::RDI);
21103 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21104 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21105 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21106 TII->get(X86::MOV32rm), X86::EAX)
21108 .addImm(0).addReg(0)
21109 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21110 MI->getOperand(3).getTargetFlags())
21112 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21113 addDirectMem(MIB, X86::EAX);
21114 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21116 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21117 TII->get(X86::MOV32rm), X86::EAX)
21118 .addReg(TII->getGlobalBaseReg(F))
21119 .addImm(0).addReg(0)
21120 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21121 MI->getOperand(3).getTargetFlags())
21123 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21124 addDirectMem(MIB, X86::EAX);
21125 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21128 MI->eraseFromParent(); // The pseudo instruction is gone now.
21132 MachineBasicBlock *
21133 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21134 MachineBasicBlock *MBB) const {
21135 DebugLoc DL = MI->getDebugLoc();
21136 MachineFunction *MF = MBB->getParent();
21137 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
21138 MachineRegisterInfo &MRI = MF->getRegInfo();
21140 const BasicBlock *BB = MBB->getBasicBlock();
21141 MachineFunction::iterator I = MBB;
21144 // Memory Reference
21145 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21146 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21149 unsigned MemOpndSlot = 0;
21151 unsigned CurOp = 0;
21153 DstReg = MI->getOperand(CurOp++).getReg();
21154 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21155 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21156 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21157 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21159 MemOpndSlot = CurOp;
21161 MVT PVT = getPointerTy();
21162 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21163 "Invalid Pointer Size!");
21165 // For v = setjmp(buf), we generate
21168 // buf[LabelOffset] = restoreMBB
21169 // SjLjSetup restoreMBB
21175 // v = phi(main, restore)
21178 // if base pointer being used, load it from frame
21181 MachineBasicBlock *thisMBB = MBB;
21182 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21183 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21184 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21185 MF->insert(I, mainMBB);
21186 MF->insert(I, sinkMBB);
21187 MF->push_back(restoreMBB);
21189 MachineInstrBuilder MIB;
21191 // Transfer the remainder of BB and its successor edges to sinkMBB.
21192 sinkMBB->splice(sinkMBB->begin(), MBB,
21193 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21194 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21197 unsigned PtrStoreOpc = 0;
21198 unsigned LabelReg = 0;
21199 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21200 Reloc::Model RM = MF->getTarget().getRelocationModel();
21201 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21202 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21204 // Prepare IP either in reg or imm.
21205 if (!UseImmLabel) {
21206 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21207 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21208 LabelReg = MRI.createVirtualRegister(PtrRC);
21209 if (Subtarget->is64Bit()) {
21210 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21214 .addMBB(restoreMBB)
21217 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21218 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21219 .addReg(XII->getGlobalBaseReg(MF))
21222 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21226 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21228 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21229 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21230 if (i == X86::AddrDisp)
21231 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21233 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21236 MIB.addReg(LabelReg);
21238 MIB.addMBB(restoreMBB);
21239 MIB.setMemRefs(MMOBegin, MMOEnd);
21241 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21242 .addMBB(restoreMBB);
21244 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
21245 MF->getSubtarget().getRegisterInfo());
21246 MIB.addRegMask(RegInfo->getNoPreservedMask());
21247 thisMBB->addSuccessor(mainMBB);
21248 thisMBB->addSuccessor(restoreMBB);
21252 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21253 mainMBB->addSuccessor(sinkMBB);
21256 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21257 TII->get(X86::PHI), DstReg)
21258 .addReg(mainDstReg).addMBB(mainMBB)
21259 .addReg(restoreDstReg).addMBB(restoreMBB);
21262 if (RegInfo->hasBasePointer(*MF)) {
21263 const X86Subtarget &STI = MF->getTarget().getSubtarget<X86Subtarget>();
21264 const bool Uses64BitFramePtr = STI.isTarget64BitLP64() || STI.isTargetNaCl64();
21265 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21266 X86FI->setRestoreBasePointer(MF);
21267 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21268 unsigned BasePtr = RegInfo->getBaseRegister();
21269 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21270 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21271 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21272 .setMIFlag(MachineInstr::FrameSetup);
21274 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21275 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21276 restoreMBB->addSuccessor(sinkMBB);
21278 MI->eraseFromParent();
21282 MachineBasicBlock *
21283 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21284 MachineBasicBlock *MBB) const {
21285 DebugLoc DL = MI->getDebugLoc();
21286 MachineFunction *MF = MBB->getParent();
21287 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
21288 MachineRegisterInfo &MRI = MF->getRegInfo();
21290 // Memory Reference
21291 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21292 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21294 MVT PVT = getPointerTy();
21295 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21296 "Invalid Pointer Size!");
21298 const TargetRegisterClass *RC =
21299 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21300 unsigned Tmp = MRI.createVirtualRegister(RC);
21301 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21302 const X86RegisterInfo *RegInfo = static_cast<const X86RegisterInfo *>(
21303 MF->getSubtarget().getRegisterInfo());
21304 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21305 unsigned SP = RegInfo->getStackRegister();
21307 MachineInstrBuilder MIB;
21309 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21310 const int64_t SPOffset = 2 * PVT.getStoreSize();
21312 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21313 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21316 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21317 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21318 MIB.addOperand(MI->getOperand(i));
21319 MIB.setMemRefs(MMOBegin, MMOEnd);
21321 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21322 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21323 if (i == X86::AddrDisp)
21324 MIB.addDisp(MI->getOperand(i), LabelOffset);
21326 MIB.addOperand(MI->getOperand(i));
21328 MIB.setMemRefs(MMOBegin, MMOEnd);
21330 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21331 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21332 if (i == X86::AddrDisp)
21333 MIB.addDisp(MI->getOperand(i), SPOffset);
21335 MIB.addOperand(MI->getOperand(i));
21337 MIB.setMemRefs(MMOBegin, MMOEnd);
21339 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21341 MI->eraseFromParent();
21345 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21346 // accumulator loops. Writing back to the accumulator allows the coalescer
21347 // to remove extra copies in the loop.
21348 MachineBasicBlock *
21349 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21350 MachineBasicBlock *MBB) const {
21351 MachineOperand &AddendOp = MI->getOperand(3);
21353 // Bail out early if the addend isn't a register - we can't switch these.
21354 if (!AddendOp.isReg())
21357 MachineFunction &MF = *MBB->getParent();
21358 MachineRegisterInfo &MRI = MF.getRegInfo();
21360 // Check whether the addend is defined by a PHI:
21361 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21362 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21363 if (!AddendDef.isPHI())
21366 // Look for the following pattern:
21368 // %addend = phi [%entry, 0], [%loop, %result]
21370 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21374 // %addend = phi [%entry, 0], [%loop, %result]
21376 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21378 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21379 assert(AddendDef.getOperand(i).isReg());
21380 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21381 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21382 if (&PHISrcInst == MI) {
21383 // Found a matching instruction.
21384 unsigned NewFMAOpc = 0;
21385 switch (MI->getOpcode()) {
21386 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21387 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21388 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21389 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21390 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21391 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21392 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21393 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21394 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21395 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21396 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21397 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21398 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21399 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21400 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21401 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21402 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21403 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21404 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21405 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21407 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21408 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21409 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21410 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21411 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21412 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21413 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21414 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21415 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21416 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21417 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21418 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21419 default: llvm_unreachable("Unrecognized FMA variant.");
21422 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
21423 MachineInstrBuilder MIB =
21424 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21425 .addOperand(MI->getOperand(0))
21426 .addOperand(MI->getOperand(3))
21427 .addOperand(MI->getOperand(2))
21428 .addOperand(MI->getOperand(1));
21429 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21430 MI->eraseFromParent();
21437 MachineBasicBlock *
21438 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21439 MachineBasicBlock *BB) const {
21440 switch (MI->getOpcode()) {
21441 default: llvm_unreachable("Unexpected instr type to insert");
21442 case X86::TAILJMPd64:
21443 case X86::TAILJMPr64:
21444 case X86::TAILJMPm64:
21445 llvm_unreachable("TAILJMP64 would not be touched here.");
21446 case X86::TCRETURNdi64:
21447 case X86::TCRETURNri64:
21448 case X86::TCRETURNmi64:
21450 case X86::WIN_ALLOCA:
21451 return EmitLoweredWinAlloca(MI, BB);
21452 case X86::SEG_ALLOCA_32:
21453 case X86::SEG_ALLOCA_64:
21454 return EmitLoweredSegAlloca(MI, BB);
21455 case X86::TLSCall_32:
21456 case X86::TLSCall_64:
21457 return EmitLoweredTLSCall(MI, BB);
21458 case X86::CMOV_GR8:
21459 case X86::CMOV_FR32:
21460 case X86::CMOV_FR64:
21461 case X86::CMOV_V4F32:
21462 case X86::CMOV_V2F64:
21463 case X86::CMOV_V2I64:
21464 case X86::CMOV_V8F32:
21465 case X86::CMOV_V4F64:
21466 case X86::CMOV_V4I64:
21467 case X86::CMOV_V16F32:
21468 case X86::CMOV_V8F64:
21469 case X86::CMOV_V8I64:
21470 case X86::CMOV_GR16:
21471 case X86::CMOV_GR32:
21472 case X86::CMOV_RFP32:
21473 case X86::CMOV_RFP64:
21474 case X86::CMOV_RFP80:
21475 return EmitLoweredSelect(MI, BB);
21477 case X86::FP32_TO_INT16_IN_MEM:
21478 case X86::FP32_TO_INT32_IN_MEM:
21479 case X86::FP32_TO_INT64_IN_MEM:
21480 case X86::FP64_TO_INT16_IN_MEM:
21481 case X86::FP64_TO_INT32_IN_MEM:
21482 case X86::FP64_TO_INT64_IN_MEM:
21483 case X86::FP80_TO_INT16_IN_MEM:
21484 case X86::FP80_TO_INT32_IN_MEM:
21485 case X86::FP80_TO_INT64_IN_MEM: {
21486 MachineFunction *F = BB->getParent();
21487 const TargetInstrInfo *TII = F->getSubtarget().getInstrInfo();
21488 DebugLoc DL = MI->getDebugLoc();
21490 // Change the floating point control register to use "round towards zero"
21491 // mode when truncating to an integer value.
21492 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21493 addFrameReference(BuildMI(*BB, MI, DL,
21494 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21496 // Load the old value of the high byte of the control word...
21498 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21499 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21502 // Set the high part to be round to zero...
21503 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21506 // Reload the modified control word now...
21507 addFrameReference(BuildMI(*BB, MI, DL,
21508 TII->get(X86::FLDCW16m)), CWFrameIdx);
21510 // Restore the memory image of control word to original value
21511 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21514 // Get the X86 opcode to use.
21516 switch (MI->getOpcode()) {
21517 default: llvm_unreachable("illegal opcode!");
21518 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21519 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21520 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21521 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21522 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21523 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21524 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21525 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21526 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21530 MachineOperand &Op = MI->getOperand(0);
21532 AM.BaseType = X86AddressMode::RegBase;
21533 AM.Base.Reg = Op.getReg();
21535 AM.BaseType = X86AddressMode::FrameIndexBase;
21536 AM.Base.FrameIndex = Op.getIndex();
21538 Op = MI->getOperand(1);
21540 AM.Scale = Op.getImm();
21541 Op = MI->getOperand(2);
21543 AM.IndexReg = Op.getImm();
21544 Op = MI->getOperand(3);
21545 if (Op.isGlobal()) {
21546 AM.GV = Op.getGlobal();
21548 AM.Disp = Op.getImm();
21550 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
21551 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
21553 // Reload the original control word now.
21554 addFrameReference(BuildMI(*BB, MI, DL,
21555 TII->get(X86::FLDCW16m)), CWFrameIdx);
21557 MI->eraseFromParent(); // The pseudo instruction is gone now.
21560 // String/text processing lowering.
21561 case X86::PCMPISTRM128REG:
21562 case X86::VPCMPISTRM128REG:
21563 case X86::PCMPISTRM128MEM:
21564 case X86::VPCMPISTRM128MEM:
21565 case X86::PCMPESTRM128REG:
21566 case X86::VPCMPESTRM128REG:
21567 case X86::PCMPESTRM128MEM:
21568 case X86::VPCMPESTRM128MEM:
21569 assert(Subtarget->hasSSE42() &&
21570 "Target must have SSE4.2 or AVX features enabled");
21571 return EmitPCMPSTRM(MI, BB, BB->getParent()->getSubtarget().getInstrInfo());
21573 // String/text processing lowering.
21574 case X86::PCMPISTRIREG:
21575 case X86::VPCMPISTRIREG:
21576 case X86::PCMPISTRIMEM:
21577 case X86::VPCMPISTRIMEM:
21578 case X86::PCMPESTRIREG:
21579 case X86::VPCMPESTRIREG:
21580 case X86::PCMPESTRIMEM:
21581 case X86::VPCMPESTRIMEM:
21582 assert(Subtarget->hasSSE42() &&
21583 "Target must have SSE4.2 or AVX features enabled");
21584 return EmitPCMPSTRI(MI, BB, BB->getParent()->getSubtarget().getInstrInfo());
21586 // Thread synchronization.
21588 return EmitMonitor(MI, BB, BB->getParent()->getSubtarget().getInstrInfo(),
21593 return EmitXBegin(MI, BB, BB->getParent()->getSubtarget().getInstrInfo());
21595 case X86::VASTART_SAVE_XMM_REGS:
21596 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
21598 case X86::VAARG_64:
21599 return EmitVAARG64WithCustomInserter(MI, BB);
21601 case X86::EH_SjLj_SetJmp32:
21602 case X86::EH_SjLj_SetJmp64:
21603 return emitEHSjLjSetJmp(MI, BB);
21605 case X86::EH_SjLj_LongJmp32:
21606 case X86::EH_SjLj_LongJmp64:
21607 return emitEHSjLjLongJmp(MI, BB);
21609 case TargetOpcode::STATEPOINT:
21610 // As an implementation detail, STATEPOINT shares the STACKMAP format at
21611 // this point in the process. We diverge later.
21612 return emitPatchPoint(MI, BB);
21614 case TargetOpcode::STACKMAP:
21615 case TargetOpcode::PATCHPOINT:
21616 return emitPatchPoint(MI, BB);
21618 case X86::VFMADDPDr213r:
21619 case X86::VFMADDPSr213r:
21620 case X86::VFMADDSDr213r:
21621 case X86::VFMADDSSr213r:
21622 case X86::VFMSUBPDr213r:
21623 case X86::VFMSUBPSr213r:
21624 case X86::VFMSUBSDr213r:
21625 case X86::VFMSUBSSr213r:
21626 case X86::VFNMADDPDr213r:
21627 case X86::VFNMADDPSr213r:
21628 case X86::VFNMADDSDr213r:
21629 case X86::VFNMADDSSr213r:
21630 case X86::VFNMSUBPDr213r:
21631 case X86::VFNMSUBPSr213r:
21632 case X86::VFNMSUBSDr213r:
21633 case X86::VFNMSUBSSr213r:
21634 case X86::VFMADDSUBPDr213r:
21635 case X86::VFMADDSUBPSr213r:
21636 case X86::VFMSUBADDPDr213r:
21637 case X86::VFMSUBADDPSr213r:
21638 case X86::VFMADDPDr213rY:
21639 case X86::VFMADDPSr213rY:
21640 case X86::VFMSUBPDr213rY:
21641 case X86::VFMSUBPSr213rY:
21642 case X86::VFNMADDPDr213rY:
21643 case X86::VFNMADDPSr213rY:
21644 case X86::VFNMSUBPDr213rY:
21645 case X86::VFNMSUBPSr213rY:
21646 case X86::VFMADDSUBPDr213rY:
21647 case X86::VFMADDSUBPSr213rY:
21648 case X86::VFMSUBADDPDr213rY:
21649 case X86::VFMSUBADDPSr213rY:
21650 return emitFMA3Instr(MI, BB);
21654 //===----------------------------------------------------------------------===//
21655 // X86 Optimization Hooks
21656 //===----------------------------------------------------------------------===//
21658 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
21661 const SelectionDAG &DAG,
21662 unsigned Depth) const {
21663 unsigned BitWidth = KnownZero.getBitWidth();
21664 unsigned Opc = Op.getOpcode();
21665 assert((Opc >= ISD::BUILTIN_OP_END ||
21666 Opc == ISD::INTRINSIC_WO_CHAIN ||
21667 Opc == ISD::INTRINSIC_W_CHAIN ||
21668 Opc == ISD::INTRINSIC_VOID) &&
21669 "Should use MaskedValueIsZero if you don't know whether Op"
21670 " is a target node!");
21672 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
21686 // These nodes' second result is a boolean.
21687 if (Op.getResNo() == 0)
21690 case X86ISD::SETCC:
21691 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
21693 case ISD::INTRINSIC_WO_CHAIN: {
21694 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
21695 unsigned NumLoBits = 0;
21698 case Intrinsic::x86_sse_movmsk_ps:
21699 case Intrinsic::x86_avx_movmsk_ps_256:
21700 case Intrinsic::x86_sse2_movmsk_pd:
21701 case Intrinsic::x86_avx_movmsk_pd_256:
21702 case Intrinsic::x86_mmx_pmovmskb:
21703 case Intrinsic::x86_sse2_pmovmskb_128:
21704 case Intrinsic::x86_avx2_pmovmskb: {
21705 // High bits of movmskp{s|d}, pmovmskb are known zero.
21707 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
21708 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
21709 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
21710 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
21711 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
21712 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
21713 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
21714 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
21716 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
21725 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
21727 const SelectionDAG &,
21728 unsigned Depth) const {
21729 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
21730 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
21731 return Op.getValueType().getScalarType().getSizeInBits();
21737 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
21738 /// node is a GlobalAddress + offset.
21739 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
21740 const GlobalValue* &GA,
21741 int64_t &Offset) const {
21742 if (N->getOpcode() == X86ISD::Wrapper) {
21743 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
21744 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
21745 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
21749 return TargetLowering::isGAPlusOffset(N, GA, Offset);
21752 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
21753 /// same as extracting the high 128-bit part of 256-bit vector and then
21754 /// inserting the result into the low part of a new 256-bit vector
21755 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
21756 EVT VT = SVOp->getValueType(0);
21757 unsigned NumElems = VT.getVectorNumElements();
21759 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
21760 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
21761 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
21762 SVOp->getMaskElt(j) >= 0)
21768 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
21769 /// same as extracting the low 128-bit part of 256-bit vector and then
21770 /// inserting the result into the high part of a new 256-bit vector
21771 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
21772 EVT VT = SVOp->getValueType(0);
21773 unsigned NumElems = VT.getVectorNumElements();
21775 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
21776 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
21777 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
21778 SVOp->getMaskElt(j) >= 0)
21784 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
21785 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
21786 TargetLowering::DAGCombinerInfo &DCI,
21787 const X86Subtarget* Subtarget) {
21789 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
21790 SDValue V1 = SVOp->getOperand(0);
21791 SDValue V2 = SVOp->getOperand(1);
21792 EVT VT = SVOp->getValueType(0);
21793 unsigned NumElems = VT.getVectorNumElements();
21795 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
21796 V2.getOpcode() == ISD::CONCAT_VECTORS) {
21800 // V UNDEF BUILD_VECTOR UNDEF
21802 // CONCAT_VECTOR CONCAT_VECTOR
21805 // RESULT: V + zero extended
21807 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
21808 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
21809 V1.getOperand(1).getOpcode() != ISD::UNDEF)
21812 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
21815 // To match the shuffle mask, the first half of the mask should
21816 // be exactly the first vector, and all the rest a splat with the
21817 // first element of the second one.
21818 for (unsigned i = 0; i != NumElems/2; ++i)
21819 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
21820 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
21823 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
21824 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
21825 if (Ld->hasNUsesOfValue(1, 0)) {
21826 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
21827 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
21829 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
21831 Ld->getPointerInfo(),
21832 Ld->getAlignment(),
21833 false/*isVolatile*/, true/*ReadMem*/,
21834 false/*WriteMem*/);
21836 // Make sure the newly-created LOAD is in the same position as Ld in
21837 // terms of dependency. We create a TokenFactor for Ld and ResNode,
21838 // and update uses of Ld's output chain to use the TokenFactor.
21839 if (Ld->hasAnyUseOfValue(1)) {
21840 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
21841 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
21842 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
21843 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
21844 SDValue(ResNode.getNode(), 1));
21847 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
21851 // Emit a zeroed vector and insert the desired subvector on its
21853 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
21854 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
21855 return DCI.CombineTo(N, InsV);
21858 //===--------------------------------------------------------------------===//
21859 // Combine some shuffles into subvector extracts and inserts:
21862 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
21863 if (isShuffleHigh128VectorInsertLow(SVOp)) {
21864 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
21865 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
21866 return DCI.CombineTo(N, InsV);
21869 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
21870 if (isShuffleLow128VectorInsertHigh(SVOp)) {
21871 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
21872 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
21873 return DCI.CombineTo(N, InsV);
21879 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
21882 /// This is the leaf of the recursive combinine below. When we have found some
21883 /// chain of single-use x86 shuffle instructions and accumulated the combined
21884 /// shuffle mask represented by them, this will try to pattern match that mask
21885 /// into either a single instruction if there is a special purpose instruction
21886 /// for this operation, or into a PSHUFB instruction which is a fully general
21887 /// instruction but should only be used to replace chains over a certain depth.
21888 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
21889 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
21890 TargetLowering::DAGCombinerInfo &DCI,
21891 const X86Subtarget *Subtarget) {
21892 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
21894 // Find the operand that enters the chain. Note that multiple uses are OK
21895 // here, we're not going to remove the operand we find.
21896 SDValue Input = Op.getOperand(0);
21897 while (Input.getOpcode() == ISD::BITCAST)
21898 Input = Input.getOperand(0);
21900 MVT VT = Input.getSimpleValueType();
21901 MVT RootVT = Root.getSimpleValueType();
21904 // Just remove no-op shuffle masks.
21905 if (Mask.size() == 1) {
21906 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
21911 // Use the float domain if the operand type is a floating point type.
21912 bool FloatDomain = VT.isFloatingPoint();
21914 // For floating point shuffles, we don't have free copies in the shuffle
21915 // instructions or the ability to load as part of the instruction, so
21916 // canonicalize their shuffles to UNPCK or MOV variants.
21918 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
21919 // vectors because it can have a load folded into it that UNPCK cannot. This
21920 // doesn't preclude something switching to the shorter encoding post-RA.
21922 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
21923 bool Lo = Mask.equals(0, 0);
21926 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
21927 // is no slower than UNPCKLPD but has the option to fold the input operand
21928 // into even an unaligned memory load.
21929 if (Lo && Subtarget->hasSSE3()) {
21930 Shuffle = X86ISD::MOVDDUP;
21931 ShuffleVT = MVT::v2f64;
21933 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
21934 // than the UNPCK variants.
21935 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
21936 ShuffleVT = MVT::v4f32;
21938 if (Depth == 1 && Root->getOpcode() == Shuffle)
21939 return false; // Nothing to do!
21940 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
21941 DCI.AddToWorklist(Op.getNode());
21942 if (Shuffle == X86ISD::MOVDDUP)
21943 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
21945 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
21946 DCI.AddToWorklist(Op.getNode());
21947 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
21951 if (Subtarget->hasSSE3() &&
21952 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
21953 bool Lo = Mask.equals(0, 0, 2, 2);
21954 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
21955 MVT ShuffleVT = MVT::v4f32;
21956 if (Depth == 1 && Root->getOpcode() == Shuffle)
21957 return false; // Nothing to do!
21958 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
21959 DCI.AddToWorklist(Op.getNode());
21960 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
21961 DCI.AddToWorklist(Op.getNode());
21962 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
21966 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
21967 bool Lo = Mask.equals(0, 0, 1, 1);
21968 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
21969 MVT ShuffleVT = MVT::v4f32;
21970 if (Depth == 1 && Root->getOpcode() == Shuffle)
21971 return false; // Nothing to do!
21972 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
21973 DCI.AddToWorklist(Op.getNode());
21974 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
21975 DCI.AddToWorklist(Op.getNode());
21976 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
21982 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
21983 // variants as none of these have single-instruction variants that are
21984 // superior to the UNPCK formulation.
21985 if (!FloatDomain &&
21986 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
21987 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
21988 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
21989 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
21991 bool Lo = Mask[0] == 0;
21992 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
21993 if (Depth == 1 && Root->getOpcode() == Shuffle)
21994 return false; // Nothing to do!
21996 switch (Mask.size()) {
21998 ShuffleVT = MVT::v8i16;
22001 ShuffleVT = MVT::v16i8;
22004 llvm_unreachable("Impossible mask size!");
22006 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22007 DCI.AddToWorklist(Op.getNode());
22008 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22009 DCI.AddToWorklist(Op.getNode());
22010 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22015 // Don't try to re-form single instruction chains under any circumstances now
22016 // that we've done encoding canonicalization for them.
22020 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22021 // can replace them with a single PSHUFB instruction profitably. Intel's
22022 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22023 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22024 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22025 SmallVector<SDValue, 16> PSHUFBMask;
22026 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22027 int Ratio = 16 / Mask.size();
22028 for (unsigned i = 0; i < 16; ++i) {
22029 if (Mask[i / Ratio] == SM_SentinelUndef) {
22030 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22033 int M = Mask[i / Ratio] != SM_SentinelZero
22034 ? Ratio * Mask[i / Ratio] + i % Ratio
22036 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22038 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22039 DCI.AddToWorklist(Op.getNode());
22040 SDValue PSHUFBMaskOp =
22041 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22042 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22043 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22044 DCI.AddToWorklist(Op.getNode());
22045 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22050 // Failed to find any combines.
22054 /// \brief Fully generic combining of x86 shuffle instructions.
22056 /// This should be the last combine run over the x86 shuffle instructions. Once
22057 /// they have been fully optimized, this will recursively consider all chains
22058 /// of single-use shuffle instructions, build a generic model of the cumulative
22059 /// shuffle operation, and check for simpler instructions which implement this
22060 /// operation. We use this primarily for two purposes:
22062 /// 1) Collapse generic shuffles to specialized single instructions when
22063 /// equivalent. In most cases, this is just an encoding size win, but
22064 /// sometimes we will collapse multiple generic shuffles into a single
22065 /// special-purpose shuffle.
22066 /// 2) Look for sequences of shuffle instructions with 3 or more total
22067 /// instructions, and replace them with the slightly more expensive SSSE3
22068 /// PSHUFB instruction if available. We do this as the last combining step
22069 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22070 /// a suitable short sequence of other instructions. The PHUFB will either
22071 /// use a register or have to read from memory and so is slightly (but only
22072 /// slightly) more expensive than the other shuffle instructions.
22074 /// Because this is inherently a quadratic operation (for each shuffle in
22075 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22076 /// This should never be an issue in practice as the shuffle lowering doesn't
22077 /// produce sequences of more than 8 instructions.
22079 /// FIXME: We will currently miss some cases where the redundant shuffling
22080 /// would simplify under the threshold for PSHUFB formation because of
22081 /// combine-ordering. To fix this, we should do the redundant instruction
22082 /// combining in this recursive walk.
22083 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22084 ArrayRef<int> RootMask,
22085 int Depth, bool HasPSHUFB,
22087 TargetLowering::DAGCombinerInfo &DCI,
22088 const X86Subtarget *Subtarget) {
22089 // Bound the depth of our recursive combine because this is ultimately
22090 // quadratic in nature.
22094 // Directly rip through bitcasts to find the underlying operand.
22095 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22096 Op = Op.getOperand(0);
22098 MVT VT = Op.getSimpleValueType();
22099 if (!VT.isVector())
22100 return false; // Bail if we hit a non-vector.
22101 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22102 // version should be added.
22103 if (VT.getSizeInBits() != 128)
22106 assert(Root.getSimpleValueType().isVector() &&
22107 "Shuffles operate on vector types!");
22108 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22109 "Can only combine shuffles of the same vector register size.");
22111 if (!isTargetShuffle(Op.getOpcode()))
22113 SmallVector<int, 16> OpMask;
22115 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22116 // We only can combine unary shuffles which we can decode the mask for.
22117 if (!HaveMask || !IsUnary)
22120 assert(VT.getVectorNumElements() == OpMask.size() &&
22121 "Different mask size from vector size!");
22122 assert(((RootMask.size() > OpMask.size() &&
22123 RootMask.size() % OpMask.size() == 0) ||
22124 (OpMask.size() > RootMask.size() &&
22125 OpMask.size() % RootMask.size() == 0) ||
22126 OpMask.size() == RootMask.size()) &&
22127 "The smaller number of elements must divide the larger.");
22128 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22129 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22130 assert(((RootRatio == 1 && OpRatio == 1) ||
22131 (RootRatio == 1) != (OpRatio == 1)) &&
22132 "Must not have a ratio for both incoming and op masks!");
22134 SmallVector<int, 16> Mask;
22135 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22137 // Merge this shuffle operation's mask into our accumulated mask. Note that
22138 // this shuffle's mask will be the first applied to the input, followed by the
22139 // root mask to get us all the way to the root value arrangement. The reason
22140 // for this order is that we are recursing up the operation chain.
22141 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22142 int RootIdx = i / RootRatio;
22143 if (RootMask[RootIdx] < 0) {
22144 // This is a zero or undef lane, we're done.
22145 Mask.push_back(RootMask[RootIdx]);
22149 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22150 int OpIdx = RootMaskedIdx / OpRatio;
22151 if (OpMask[OpIdx] < 0) {
22152 // The incoming lanes are zero or undef, it doesn't matter which ones we
22154 Mask.push_back(OpMask[OpIdx]);
22158 // Ok, we have non-zero lanes, map them through.
22159 Mask.push_back(OpMask[OpIdx] * OpRatio +
22160 RootMaskedIdx % OpRatio);
22163 // See if we can recurse into the operand to combine more things.
22164 switch (Op.getOpcode()) {
22165 case X86ISD::PSHUFB:
22167 case X86ISD::PSHUFD:
22168 case X86ISD::PSHUFHW:
22169 case X86ISD::PSHUFLW:
22170 if (Op.getOperand(0).hasOneUse() &&
22171 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22172 HasPSHUFB, DAG, DCI, Subtarget))
22176 case X86ISD::UNPCKL:
22177 case X86ISD::UNPCKH:
22178 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22179 // We can't check for single use, we have to check that this shuffle is the only user.
22180 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22181 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22182 HasPSHUFB, DAG, DCI, Subtarget))
22187 // Minor canonicalization of the accumulated shuffle mask to make it easier
22188 // to match below. All this does is detect masks with squential pairs of
22189 // elements, and shrink them to the half-width mask. It does this in a loop
22190 // so it will reduce the size of the mask to the minimal width mask which
22191 // performs an equivalent shuffle.
22192 SmallVector<int, 16> WidenedMask;
22193 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22194 Mask = std::move(WidenedMask);
22195 WidenedMask.clear();
22198 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22202 /// \brief Get the PSHUF-style mask from PSHUF node.
22204 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22205 /// PSHUF-style masks that can be reused with such instructions.
22206 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22207 SmallVector<int, 4> Mask;
22209 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22213 switch (N.getOpcode()) {
22214 case X86ISD::PSHUFD:
22216 case X86ISD::PSHUFLW:
22219 case X86ISD::PSHUFHW:
22220 Mask.erase(Mask.begin(), Mask.begin() + 4);
22221 for (int &M : Mask)
22225 llvm_unreachable("No valid shuffle instruction found!");
22229 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22231 /// We walk up the chain and look for a combinable shuffle, skipping over
22232 /// shuffles that we could hoist this shuffle's transformation past without
22233 /// altering anything.
22235 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22237 TargetLowering::DAGCombinerInfo &DCI) {
22238 assert(N.getOpcode() == X86ISD::PSHUFD &&
22239 "Called with something other than an x86 128-bit half shuffle!");
22242 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22243 // of the shuffles in the chain so that we can form a fresh chain to replace
22245 SmallVector<SDValue, 8> Chain;
22246 SDValue V = N.getOperand(0);
22247 for (; V.hasOneUse(); V = V.getOperand(0)) {
22248 switch (V.getOpcode()) {
22250 return SDValue(); // Nothing combined!
22253 // Skip bitcasts as we always know the type for the target specific
22257 case X86ISD::PSHUFD:
22258 // Found another dword shuffle.
22261 case X86ISD::PSHUFLW:
22262 // Check that the low words (being shuffled) are the identity in the
22263 // dword shuffle, and the high words are self-contained.
22264 if (Mask[0] != 0 || Mask[1] != 1 ||
22265 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22268 Chain.push_back(V);
22271 case X86ISD::PSHUFHW:
22272 // Check that the high words (being shuffled) are the identity in the
22273 // dword shuffle, and the low words are self-contained.
22274 if (Mask[2] != 2 || Mask[3] != 3 ||
22275 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22278 Chain.push_back(V);
22281 case X86ISD::UNPCKL:
22282 case X86ISD::UNPCKH:
22283 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22284 // shuffle into a preceding word shuffle.
22285 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22288 // Search for a half-shuffle which we can combine with.
22289 unsigned CombineOp =
22290 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22291 if (V.getOperand(0) != V.getOperand(1) ||
22292 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22294 Chain.push_back(V);
22295 V = V.getOperand(0);
22297 switch (V.getOpcode()) {
22299 return SDValue(); // Nothing to combine.
22301 case X86ISD::PSHUFLW:
22302 case X86ISD::PSHUFHW:
22303 if (V.getOpcode() == CombineOp)
22306 Chain.push_back(V);
22310 V = V.getOperand(0);
22314 } while (V.hasOneUse());
22317 // Break out of the loop if we break out of the switch.
22321 if (!V.hasOneUse())
22322 // We fell out of the loop without finding a viable combining instruction.
22325 // Merge this node's mask and our incoming mask.
22326 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22327 for (int &M : Mask)
22329 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22330 getV4X86ShuffleImm8ForMask(Mask, DAG));
22332 // Rebuild the chain around this new shuffle.
22333 while (!Chain.empty()) {
22334 SDValue W = Chain.pop_back_val();
22336 if (V.getValueType() != W.getOperand(0).getValueType())
22337 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22339 switch (W.getOpcode()) {
22341 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22343 case X86ISD::UNPCKL:
22344 case X86ISD::UNPCKH:
22345 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22348 case X86ISD::PSHUFD:
22349 case X86ISD::PSHUFLW:
22350 case X86ISD::PSHUFHW:
22351 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22355 if (V.getValueType() != N.getValueType())
22356 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22358 // Return the new chain to replace N.
22362 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22364 /// We walk up the chain, skipping shuffles of the other half and looking
22365 /// through shuffles which switch halves trying to find a shuffle of the same
22366 /// pair of dwords.
22367 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22369 TargetLowering::DAGCombinerInfo &DCI) {
22371 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22372 "Called with something other than an x86 128-bit half shuffle!");
22374 unsigned CombineOpcode = N.getOpcode();
22376 // Walk up a single-use chain looking for a combinable shuffle.
22377 SDValue V = N.getOperand(0);
22378 for (; V.hasOneUse(); V = V.getOperand(0)) {
22379 switch (V.getOpcode()) {
22381 return false; // Nothing combined!
22384 // Skip bitcasts as we always know the type for the target specific
22388 case X86ISD::PSHUFLW:
22389 case X86ISD::PSHUFHW:
22390 if (V.getOpcode() == CombineOpcode)
22393 // Other-half shuffles are no-ops.
22396 // Break out of the loop if we break out of the switch.
22400 if (!V.hasOneUse())
22401 // We fell out of the loop without finding a viable combining instruction.
22404 // Combine away the bottom node as its shuffle will be accumulated into
22405 // a preceding shuffle.
22406 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22408 // Record the old value.
22411 // Merge this node's mask and our incoming mask (adjusted to account for all
22412 // the pshufd instructions encountered).
22413 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22414 for (int &M : Mask)
22416 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22417 getV4X86ShuffleImm8ForMask(Mask, DAG));
22419 // Check that the shuffles didn't cancel each other out. If not, we need to
22420 // combine to the new one.
22422 // Replace the combinable shuffle with the combined one, updating all users
22423 // so that we re-evaluate the chain here.
22424 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22429 /// \brief Try to combine x86 target specific shuffles.
22430 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22431 TargetLowering::DAGCombinerInfo &DCI,
22432 const X86Subtarget *Subtarget) {
22434 MVT VT = N.getSimpleValueType();
22435 SmallVector<int, 4> Mask;
22437 switch (N.getOpcode()) {
22438 case X86ISD::PSHUFD:
22439 case X86ISD::PSHUFLW:
22440 case X86ISD::PSHUFHW:
22441 Mask = getPSHUFShuffleMask(N);
22442 assert(Mask.size() == 4);
22448 // Nuke no-op shuffles that show up after combining.
22449 if (isNoopShuffleMask(Mask))
22450 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22452 // Look for simplifications involving one or two shuffle instructions.
22453 SDValue V = N.getOperand(0);
22454 switch (N.getOpcode()) {
22457 case X86ISD::PSHUFLW:
22458 case X86ISD::PSHUFHW:
22459 assert(VT == MVT::v8i16);
22462 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22463 return SDValue(); // We combined away this shuffle, so we're done.
22465 // See if this reduces to a PSHUFD which is no more expensive and can
22466 // combine with more operations. Note that it has to at least flip the
22467 // dwords as otherwise it would have been removed as a no-op.
22468 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22469 int DMask[] = {0, 1, 2, 3};
22470 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22471 DMask[DOffset + 0] = DOffset + 1;
22472 DMask[DOffset + 1] = DOffset + 0;
22473 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22474 DCI.AddToWorklist(V.getNode());
22475 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22476 getV4X86ShuffleImm8ForMask(DMask, DAG));
22477 DCI.AddToWorklist(V.getNode());
22478 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22481 // Look for shuffle patterns which can be implemented as a single unpack.
22482 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22483 // only works when we have a PSHUFD followed by two half-shuffles.
22484 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22485 (V.getOpcode() == X86ISD::PSHUFLW ||
22486 V.getOpcode() == X86ISD::PSHUFHW) &&
22487 V.getOpcode() != N.getOpcode() &&
22489 SDValue D = V.getOperand(0);
22490 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22491 D = D.getOperand(0);
22492 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22493 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22494 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22495 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22496 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22498 for (int i = 0; i < 4; ++i) {
22499 WordMask[i + NOffset] = Mask[i] + NOffset;
22500 WordMask[i + VOffset] = VMask[i] + VOffset;
22502 // Map the word mask through the DWord mask.
22504 for (int i = 0; i < 8; ++i)
22505 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22506 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22507 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22508 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22509 std::begin(UnpackLoMask)) ||
22510 std::equal(std::begin(MappedMask), std::end(MappedMask),
22511 std::begin(UnpackHiMask))) {
22512 // We can replace all three shuffles with an unpack.
22513 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22514 DCI.AddToWorklist(V.getNode());
22515 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22517 DL, MVT::v8i16, V, V);
22524 case X86ISD::PSHUFD:
22525 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22534 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22536 /// We combine this directly on the abstract vector shuffle nodes so it is
22537 /// easier to generically match. We also insert dummy vector shuffle nodes for
22538 /// the operands which explicitly discard the lanes which are unused by this
22539 /// operation to try to flow through the rest of the combiner the fact that
22540 /// they're unused.
22541 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
22543 EVT VT = N->getValueType(0);
22545 // We only handle target-independent shuffles.
22546 // FIXME: It would be easy and harmless to use the target shuffle mask
22547 // extraction tool to support more.
22548 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
22551 auto *SVN = cast<ShuffleVectorSDNode>(N);
22552 ArrayRef<int> Mask = SVN->getMask();
22553 SDValue V1 = N->getOperand(0);
22554 SDValue V2 = N->getOperand(1);
22556 // We require the first shuffle operand to be the SUB node, and the second to
22557 // be the ADD node.
22558 // FIXME: We should support the commuted patterns.
22559 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
22562 // If there are other uses of these operations we can't fold them.
22563 if (!V1->hasOneUse() || !V2->hasOneUse())
22566 // Ensure that both operations have the same operands. Note that we can
22567 // commute the FADD operands.
22568 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
22569 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
22570 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
22573 // We're looking for blends between FADD and FSUB nodes. We insist on these
22574 // nodes being lined up in a specific expected pattern.
22575 if (!(isShuffleEquivalent(Mask, 0, 3) ||
22576 isShuffleEquivalent(Mask, 0, 5, 2, 7) ||
22577 isShuffleEquivalent(Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
22580 // Only specific types are legal at this point, assert so we notice if and
22581 // when these change.
22582 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
22583 VT == MVT::v4f64) &&
22584 "Unknown vector type encountered!");
22586 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
22589 /// PerformShuffleCombine - Performs several different shuffle combines.
22590 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
22591 TargetLowering::DAGCombinerInfo &DCI,
22592 const X86Subtarget *Subtarget) {
22594 SDValue N0 = N->getOperand(0);
22595 SDValue N1 = N->getOperand(1);
22596 EVT VT = N->getValueType(0);
22598 // Don't create instructions with illegal types after legalize types has run.
22599 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22600 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
22603 // If we have legalized the vector types, look for blends of FADD and FSUB
22604 // nodes that we can fuse into an ADDSUB node.
22605 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
22606 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
22609 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
22610 if (Subtarget->hasFp256() && VT.is256BitVector() &&
22611 N->getOpcode() == ISD::VECTOR_SHUFFLE)
22612 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
22614 // During Type Legalization, when promoting illegal vector types,
22615 // the backend might introduce new shuffle dag nodes and bitcasts.
22617 // This code performs the following transformation:
22618 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
22619 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
22621 // We do this only if both the bitcast and the BINOP dag nodes have
22622 // one use. Also, perform this transformation only if the new binary
22623 // operation is legal. This is to avoid introducing dag nodes that
22624 // potentially need to be further expanded (or custom lowered) into a
22625 // less optimal sequence of dag nodes.
22626 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
22627 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
22628 N0.getOpcode() == ISD::BITCAST) {
22629 SDValue BC0 = N0.getOperand(0);
22630 EVT SVT = BC0.getValueType();
22631 unsigned Opcode = BC0.getOpcode();
22632 unsigned NumElts = VT.getVectorNumElements();
22634 if (BC0.hasOneUse() && SVT.isVector() &&
22635 SVT.getVectorNumElements() * 2 == NumElts &&
22636 TLI.isOperationLegal(Opcode, VT)) {
22637 bool CanFold = false;
22649 unsigned SVTNumElts = SVT.getVectorNumElements();
22650 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22651 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
22652 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
22653 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
22654 CanFold = SVOp->getMaskElt(i) < 0;
22657 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
22658 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
22659 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
22660 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
22665 // Only handle 128 wide vector from here on.
22666 if (!VT.is128BitVector())
22669 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
22670 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
22671 // consecutive, non-overlapping, and in the right order.
22672 SmallVector<SDValue, 16> Elts;
22673 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
22674 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
22676 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
22680 if (isTargetShuffle(N->getOpcode())) {
22682 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
22683 if (Shuffle.getNode())
22686 // Try recursively combining arbitrary sequences of x86 shuffle
22687 // instructions into higher-order shuffles. We do this after combining
22688 // specific PSHUF instruction sequences into their minimal form so that we
22689 // can evaluate how many specialized shuffle instructions are involved in
22690 // a particular chain.
22691 SmallVector<int, 1> NonceMask; // Just a placeholder.
22692 NonceMask.push_back(0);
22693 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
22694 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
22696 return SDValue(); // This routine will use CombineTo to replace N.
22702 /// PerformTruncateCombine - Converts truncate operation to
22703 /// a sequence of vector shuffle operations.
22704 /// It is possible when we truncate 256-bit vector to 128-bit vector
22705 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
22706 TargetLowering::DAGCombinerInfo &DCI,
22707 const X86Subtarget *Subtarget) {
22711 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
22712 /// specific shuffle of a load can be folded into a single element load.
22713 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
22714 /// shuffles have been custom lowered so we need to handle those here.
22715 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
22716 TargetLowering::DAGCombinerInfo &DCI) {
22717 if (DCI.isBeforeLegalizeOps())
22720 SDValue InVec = N->getOperand(0);
22721 SDValue EltNo = N->getOperand(1);
22723 if (!isa<ConstantSDNode>(EltNo))
22726 EVT OriginalVT = InVec.getValueType();
22728 if (InVec.getOpcode() == ISD::BITCAST) {
22729 // Don't duplicate a load with other uses.
22730 if (!InVec.hasOneUse())
22732 EVT BCVT = InVec.getOperand(0).getValueType();
22733 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
22735 InVec = InVec.getOperand(0);
22738 EVT CurrentVT = InVec.getValueType();
22740 if (!isTargetShuffle(InVec.getOpcode()))
22743 // Don't duplicate a load with other uses.
22744 if (!InVec.hasOneUse())
22747 SmallVector<int, 16> ShuffleMask;
22749 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
22750 ShuffleMask, UnaryShuffle))
22753 // Select the input vector, guarding against out of range extract vector.
22754 unsigned NumElems = CurrentVT.getVectorNumElements();
22755 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
22756 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
22757 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
22758 : InVec.getOperand(1);
22760 // If inputs to shuffle are the same for both ops, then allow 2 uses
22761 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
22762 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
22764 if (LdNode.getOpcode() == ISD::BITCAST) {
22765 // Don't duplicate a load with other uses.
22766 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
22769 AllowedUses = 1; // only allow 1 load use if we have a bitcast
22770 LdNode = LdNode.getOperand(0);
22773 if (!ISD::isNormalLoad(LdNode.getNode()))
22776 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
22778 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
22781 EVT EltVT = N->getValueType(0);
22782 // If there's a bitcast before the shuffle, check if the load type and
22783 // alignment is valid.
22784 unsigned Align = LN0->getAlignment();
22785 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22786 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
22787 EltVT.getTypeForEVT(*DAG.getContext()));
22789 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
22792 // All checks match so transform back to vector_shuffle so that DAG combiner
22793 // can finish the job
22796 // Create shuffle node taking into account the case that its a unary shuffle
22797 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
22798 : InVec.getOperand(1);
22799 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
22800 InVec.getOperand(0), Shuffle,
22802 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
22803 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
22807 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
22808 /// generation and convert it from being a bunch of shuffles and extracts
22809 /// into a somewhat faster sequence. For i686, the best sequence is apparently
22810 /// storing the value and loading scalars back, while for x64 we should
22811 /// use 64-bit extracts and shifts.
22812 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
22813 TargetLowering::DAGCombinerInfo &DCI) {
22814 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
22815 if (NewOp.getNode())
22818 SDValue InputVector = N->getOperand(0);
22820 // Detect whether we are trying to convert from mmx to i32 and the bitcast
22821 // from mmx to v2i32 has a single usage.
22822 if (InputVector.getNode()->getOpcode() == llvm::ISD::BITCAST &&
22823 InputVector.getNode()->getOperand(0).getValueType() == MVT::x86mmx &&
22824 InputVector.hasOneUse() && N->getValueType(0) == MVT::i32)
22825 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
22826 N->getValueType(0),
22827 InputVector.getNode()->getOperand(0));
22829 // Only operate on vectors of 4 elements, where the alternative shuffling
22830 // gets to be more expensive.
22831 if (InputVector.getValueType() != MVT::v4i32)
22834 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
22835 // single use which is a sign-extend or zero-extend, and all elements are
22837 SmallVector<SDNode *, 4> Uses;
22838 unsigned ExtractedElements = 0;
22839 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
22840 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
22841 if (UI.getUse().getResNo() != InputVector.getResNo())
22844 SDNode *Extract = *UI;
22845 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
22848 if (Extract->getValueType(0) != MVT::i32)
22850 if (!Extract->hasOneUse())
22852 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
22853 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
22855 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
22858 // Record which element was extracted.
22859 ExtractedElements |=
22860 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
22862 Uses.push_back(Extract);
22865 // If not all the elements were used, this may not be worthwhile.
22866 if (ExtractedElements != 15)
22869 // Ok, we've now decided to do the transformation.
22870 // If 64-bit shifts are legal, use the extract-shift sequence,
22871 // otherwise bounce the vector off the cache.
22872 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22874 SDLoc dl(InputVector);
22876 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
22877 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
22878 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
22879 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
22880 DAG.getConstant(0, VecIdxTy));
22881 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
22882 DAG.getConstant(1, VecIdxTy));
22884 SDValue ShAmt = DAG.getConstant(32,
22885 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
22886 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
22887 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
22888 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
22889 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
22890 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
22891 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
22893 // Store the value to a temporary stack slot.
22894 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
22895 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
22896 MachinePointerInfo(), false, false, 0);
22898 EVT ElementType = InputVector.getValueType().getVectorElementType();
22899 unsigned EltSize = ElementType.getSizeInBits() / 8;
22901 // Replace each use (extract) with a load of the appropriate element.
22902 for (unsigned i = 0; i < 4; ++i) {
22903 uint64_t Offset = EltSize * i;
22904 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
22906 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
22907 StackPtr, OffsetVal);
22909 // Load the scalar.
22910 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
22911 ScalarAddr, MachinePointerInfo(),
22912 false, false, false, 0);
22917 // Replace the extracts
22918 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
22919 UE = Uses.end(); UI != UE; ++UI) {
22920 SDNode *Extract = *UI;
22922 SDValue Idx = Extract->getOperand(1);
22923 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
22924 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
22927 // The replacement was made in place; don't return anything.
22931 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
22932 static std::pair<unsigned, bool>
22933 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
22934 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
22935 if (!VT.isVector())
22936 return std::make_pair(0, false);
22938 bool NeedSplit = false;
22939 switch (VT.getSimpleVT().SimpleTy) {
22940 default: return std::make_pair(0, false);
22943 if (!Subtarget->hasVLX())
22944 return std::make_pair(0, false);
22948 if (!Subtarget->hasBWI())
22949 return std::make_pair(0, false);
22953 if (!Subtarget->hasAVX512())
22954 return std::make_pair(0, false);
22959 if (!Subtarget->hasAVX2())
22961 if (!Subtarget->hasAVX())
22962 return std::make_pair(0, false);
22967 if (!Subtarget->hasSSE2())
22968 return std::make_pair(0, false);
22971 // SSE2 has only a small subset of the operations.
22972 bool hasUnsigned = Subtarget->hasSSE41() ||
22973 (Subtarget->hasSSE2() && VT == MVT::v16i8);
22974 bool hasSigned = Subtarget->hasSSE41() ||
22975 (Subtarget->hasSSE2() && VT == MVT::v8i16);
22977 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
22980 // Check for x CC y ? x : y.
22981 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
22982 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
22987 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
22990 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
22993 Opc = hasSigned ? X86ISD::SMIN : 0; break;
22996 Opc = hasSigned ? X86ISD::SMAX : 0; break;
22998 // Check for x CC y ? y : x -- a min/max with reversed arms.
22999 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23000 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23005 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23008 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23011 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23014 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23018 return std::make_pair(Opc, NeedSplit);
23022 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23023 const X86Subtarget *Subtarget) {
23025 SDValue Cond = N->getOperand(0);
23026 SDValue LHS = N->getOperand(1);
23027 SDValue RHS = N->getOperand(2);
23029 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23030 SDValue CondSrc = Cond->getOperand(0);
23031 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23032 Cond = CondSrc->getOperand(0);
23035 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23038 // A vselect where all conditions and data are constants can be optimized into
23039 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23040 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23041 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23044 unsigned MaskValue = 0;
23045 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23048 MVT VT = N->getSimpleValueType(0);
23049 unsigned NumElems = VT.getVectorNumElements();
23050 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23051 for (unsigned i = 0; i < NumElems; ++i) {
23052 // Be sure we emit undef where we can.
23053 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23054 ShuffleMask[i] = -1;
23056 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23059 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23060 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23062 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23065 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23067 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23068 TargetLowering::DAGCombinerInfo &DCI,
23069 const X86Subtarget *Subtarget) {
23071 SDValue Cond = N->getOperand(0);
23072 // Get the LHS/RHS of the select.
23073 SDValue LHS = N->getOperand(1);
23074 SDValue RHS = N->getOperand(2);
23075 EVT VT = LHS.getValueType();
23076 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23078 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23079 // instructions match the semantics of the common C idiom x<y?x:y but not
23080 // x<=y?x:y, because of how they handle negative zero (which can be
23081 // ignored in unsafe-math mode).
23082 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23083 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23084 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23085 (Subtarget->hasSSE2() ||
23086 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23087 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23089 unsigned Opcode = 0;
23090 // Check for x CC y ? x : y.
23091 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23092 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23096 // Converting this to a min would handle NaNs incorrectly, and swapping
23097 // the operands would cause it to handle comparisons between positive
23098 // and negative zero incorrectly.
23099 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23100 if (!DAG.getTarget().Options.UnsafeFPMath &&
23101 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23103 std::swap(LHS, RHS);
23105 Opcode = X86ISD::FMIN;
23108 // Converting this to a min would handle comparisons between positive
23109 // and negative zero incorrectly.
23110 if (!DAG.getTarget().Options.UnsafeFPMath &&
23111 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23113 Opcode = X86ISD::FMIN;
23116 // Converting this to a min would handle both negative zeros and NaNs
23117 // incorrectly, but we can swap the operands to fix both.
23118 std::swap(LHS, RHS);
23122 Opcode = X86ISD::FMIN;
23126 // Converting this to a max would handle comparisons between positive
23127 // and negative zero incorrectly.
23128 if (!DAG.getTarget().Options.UnsafeFPMath &&
23129 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23131 Opcode = X86ISD::FMAX;
23134 // Converting this to a max would handle NaNs incorrectly, and swapping
23135 // the operands would cause it to handle comparisons between positive
23136 // and negative zero incorrectly.
23137 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23138 if (!DAG.getTarget().Options.UnsafeFPMath &&
23139 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23141 std::swap(LHS, RHS);
23143 Opcode = X86ISD::FMAX;
23146 // Converting this to a max would handle both negative zeros and NaNs
23147 // incorrectly, but we can swap the operands to fix both.
23148 std::swap(LHS, RHS);
23152 Opcode = X86ISD::FMAX;
23155 // Check for x CC y ? y : x -- a min/max with reversed arms.
23156 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23157 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23161 // Converting this to a min would handle comparisons between positive
23162 // and negative zero incorrectly, and swapping the operands would
23163 // cause it to handle NaNs incorrectly.
23164 if (!DAG.getTarget().Options.UnsafeFPMath &&
23165 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23166 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23168 std::swap(LHS, RHS);
23170 Opcode = X86ISD::FMIN;
23173 // Converting this to a min would handle NaNs incorrectly.
23174 if (!DAG.getTarget().Options.UnsafeFPMath &&
23175 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23177 Opcode = X86ISD::FMIN;
23180 // Converting this to a min would handle both negative zeros and NaNs
23181 // incorrectly, but we can swap the operands to fix both.
23182 std::swap(LHS, RHS);
23186 Opcode = X86ISD::FMIN;
23190 // Converting this to a max would handle NaNs incorrectly.
23191 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23193 Opcode = X86ISD::FMAX;
23196 // Converting this to a max would handle comparisons between positive
23197 // and negative zero incorrectly, and swapping the operands would
23198 // cause it to handle NaNs incorrectly.
23199 if (!DAG.getTarget().Options.UnsafeFPMath &&
23200 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23201 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23203 std::swap(LHS, RHS);
23205 Opcode = X86ISD::FMAX;
23208 // Converting this to a max would handle both negative zeros and NaNs
23209 // incorrectly, but we can swap the operands to fix both.
23210 std::swap(LHS, RHS);
23214 Opcode = X86ISD::FMAX;
23220 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23223 EVT CondVT = Cond.getValueType();
23224 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23225 CondVT.getVectorElementType() == MVT::i1) {
23226 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23227 // lowering on KNL. In this case we convert it to
23228 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23229 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23230 // Since SKX these selects have a proper lowering.
23231 EVT OpVT = LHS.getValueType();
23232 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23233 (OpVT.getVectorElementType() == MVT::i8 ||
23234 OpVT.getVectorElementType() == MVT::i16) &&
23235 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23236 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23237 DCI.AddToWorklist(Cond.getNode());
23238 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23241 // If this is a select between two integer constants, try to do some
23243 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23244 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23245 // Don't do this for crazy integer types.
23246 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23247 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23248 // so that TrueC (the true value) is larger than FalseC.
23249 bool NeedsCondInvert = false;
23251 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23252 // Efficiently invertible.
23253 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23254 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23255 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23256 NeedsCondInvert = true;
23257 std::swap(TrueC, FalseC);
23260 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23261 if (FalseC->getAPIntValue() == 0 &&
23262 TrueC->getAPIntValue().isPowerOf2()) {
23263 if (NeedsCondInvert) // Invert the condition if needed.
23264 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23265 DAG.getConstant(1, Cond.getValueType()));
23267 // Zero extend the condition if needed.
23268 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23270 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23271 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23272 DAG.getConstant(ShAmt, MVT::i8));
23275 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23276 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23277 if (NeedsCondInvert) // Invert the condition if needed.
23278 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23279 DAG.getConstant(1, Cond.getValueType()));
23281 // Zero extend the condition if needed.
23282 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23283 FalseC->getValueType(0), Cond);
23284 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23285 SDValue(FalseC, 0));
23288 // Optimize cases that will turn into an LEA instruction. This requires
23289 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23290 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23291 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23292 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23294 bool isFastMultiplier = false;
23296 switch ((unsigned char)Diff) {
23298 case 1: // result = add base, cond
23299 case 2: // result = lea base( , cond*2)
23300 case 3: // result = lea base(cond, cond*2)
23301 case 4: // result = lea base( , cond*4)
23302 case 5: // result = lea base(cond, cond*4)
23303 case 8: // result = lea base( , cond*8)
23304 case 9: // result = lea base(cond, cond*8)
23305 isFastMultiplier = true;
23310 if (isFastMultiplier) {
23311 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23312 if (NeedsCondInvert) // Invert the condition if needed.
23313 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23314 DAG.getConstant(1, Cond.getValueType()));
23316 // Zero extend the condition if needed.
23317 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23319 // Scale the condition by the difference.
23321 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23322 DAG.getConstant(Diff, Cond.getValueType()));
23324 // Add the base if non-zero.
23325 if (FalseC->getAPIntValue() != 0)
23326 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23327 SDValue(FalseC, 0));
23334 // Canonicalize max and min:
23335 // (x > y) ? x : y -> (x >= y) ? x : y
23336 // (x < y) ? x : y -> (x <= y) ? x : y
23337 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23338 // the need for an extra compare
23339 // against zero. e.g.
23340 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23342 // testl %edi, %edi
23344 // cmovgl %edi, %eax
23348 // cmovsl %eax, %edi
23349 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23350 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23351 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23352 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23357 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23358 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23359 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23360 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23365 // Early exit check
23366 if (!TLI.isTypeLegal(VT))
23369 // Match VSELECTs into subs with unsigned saturation.
23370 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23371 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23372 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23373 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23374 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23376 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23377 // left side invert the predicate to simplify logic below.
23379 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23381 CC = ISD::getSetCCInverse(CC, true);
23382 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23386 if (Other.getNode() && Other->getNumOperands() == 2 &&
23387 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23388 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23389 SDValue CondRHS = Cond->getOperand(1);
23391 // Look for a general sub with unsigned saturation first.
23392 // x >= y ? x-y : 0 --> subus x, y
23393 // x > y ? x-y : 0 --> subus x, y
23394 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23395 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23396 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23398 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23399 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23400 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23401 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23402 // If the RHS is a constant we have to reverse the const
23403 // canonicalization.
23404 // x > C-1 ? x+-C : 0 --> subus x, C
23405 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23406 CondRHSConst->getAPIntValue() ==
23407 (-OpRHSConst->getAPIntValue() - 1))
23408 return DAG.getNode(
23409 X86ISD::SUBUS, DL, VT, OpLHS,
23410 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23412 // Another special case: If C was a sign bit, the sub has been
23413 // canonicalized into a xor.
23414 // FIXME: Would it be better to use computeKnownBits to determine
23415 // whether it's safe to decanonicalize the xor?
23416 // x s< 0 ? x^C : 0 --> subus x, C
23417 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23418 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23419 OpRHSConst->getAPIntValue().isSignBit())
23420 // Note that we have to rebuild the RHS constant here to ensure we
23421 // don't rely on particular values of undef lanes.
23422 return DAG.getNode(
23423 X86ISD::SUBUS, DL, VT, OpLHS,
23424 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23429 // Try to match a min/max vector operation.
23430 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23431 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23432 unsigned Opc = ret.first;
23433 bool NeedSplit = ret.second;
23435 if (Opc && NeedSplit) {
23436 unsigned NumElems = VT.getVectorNumElements();
23437 // Extract the LHS vectors
23438 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23439 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23441 // Extract the RHS vectors
23442 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23443 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23445 // Create min/max for each subvector
23446 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23447 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23449 // Merge the result
23450 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23452 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23455 // Simplify vector selection if condition value type matches vselect
23457 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23458 assert(Cond.getValueType().isVector() &&
23459 "vector select expects a vector selector!");
23461 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23462 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23464 // Try invert the condition if true value is not all 1s and false value
23466 if (!TValIsAllOnes && !FValIsAllZeros &&
23467 // Check if the selector will be produced by CMPP*/PCMP*
23468 Cond.getOpcode() == ISD::SETCC &&
23469 // Check if SETCC has already been promoted
23470 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23471 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23472 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23474 if (TValIsAllZeros || FValIsAllOnes) {
23475 SDValue CC = Cond.getOperand(2);
23476 ISD::CondCode NewCC =
23477 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23478 Cond.getOperand(0).getValueType().isInteger());
23479 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23480 std::swap(LHS, RHS);
23481 TValIsAllOnes = FValIsAllOnes;
23482 FValIsAllZeros = TValIsAllZeros;
23486 if (TValIsAllOnes || FValIsAllZeros) {
23489 if (TValIsAllOnes && FValIsAllZeros)
23491 else if (TValIsAllOnes)
23492 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23493 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23494 else if (FValIsAllZeros)
23495 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23496 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23498 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23502 // If we know that this node is legal then we know that it is going to be
23503 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23504 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23505 // to simplify previous instructions.
23506 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23507 !DCI.isBeforeLegalize() &&
23508 // We explicitly check against v8i16 and v16i16 because, although
23509 // they're marked as Custom, they might only be legal when Cond is a
23510 // build_vector of constants. This will be taken care in a later
23512 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
23513 VT != MVT::v8i16) &&
23514 // Don't optimize vector of constants. Those are handled by
23515 // the generic code and all the bits must be properly set for
23516 // the generic optimizer.
23517 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
23518 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
23520 // Don't optimize vector selects that map to mask-registers.
23524 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
23525 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
23527 APInt KnownZero, KnownOne;
23528 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
23529 DCI.isBeforeLegalizeOps());
23530 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
23531 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
23533 // If we changed the computation somewhere in the DAG, this change
23534 // will affect all users of Cond.
23535 // Make sure it is fine and update all the nodes so that we do not
23536 // use the generic VSELECT anymore. Otherwise, we may perform
23537 // wrong optimizations as we messed up with the actual expectation
23538 // for the vector boolean values.
23539 if (Cond != TLO.Old) {
23540 // Check all uses of that condition operand to check whether it will be
23541 // consumed by non-BLEND instructions, which may depend on all bits are
23543 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23545 if (I->getOpcode() != ISD::VSELECT)
23546 // TODO: Add other opcodes eventually lowered into BLEND.
23549 // Update all the users of the condition, before committing the change,
23550 // so that the VSELECT optimizations that expect the correct vector
23551 // boolean value will not be triggered.
23552 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23554 DAG.ReplaceAllUsesOfValueWith(
23556 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
23557 Cond, I->getOperand(1), I->getOperand(2)));
23558 DCI.CommitTargetLoweringOpt(TLO);
23561 // At this point, only Cond is changed. Change the condition
23562 // just for N to keep the opportunity to optimize all other
23563 // users their own way.
23564 DAG.ReplaceAllUsesOfValueWith(
23566 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
23567 TLO.New, N->getOperand(1), N->getOperand(2)));
23572 // We should generate an X86ISD::BLENDI from a vselect if its argument
23573 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
23574 // constants. This specific pattern gets generated when we split a
23575 // selector for a 512 bit vector in a machine without AVX512 (but with
23576 // 256-bit vectors), during legalization:
23578 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
23580 // Iff we find this pattern and the build_vectors are built from
23581 // constants, we translate the vselect into a shuffle_vector that we
23582 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
23583 if ((N->getOpcode() == ISD::VSELECT ||
23584 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
23585 !DCI.isBeforeLegalize()) {
23586 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
23587 if (Shuffle.getNode())
23594 // Check whether a boolean test is testing a boolean value generated by
23595 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
23598 // Simplify the following patterns:
23599 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
23600 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
23601 // to (Op EFLAGS Cond)
23603 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
23604 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
23605 // to (Op EFLAGS !Cond)
23607 // where Op could be BRCOND or CMOV.
23609 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
23610 // Quit if not CMP and SUB with its value result used.
23611 if (Cmp.getOpcode() != X86ISD::CMP &&
23612 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
23615 // Quit if not used as a boolean value.
23616 if (CC != X86::COND_E && CC != X86::COND_NE)
23619 // Check CMP operands. One of them should be 0 or 1 and the other should be
23620 // an SetCC or extended from it.
23621 SDValue Op1 = Cmp.getOperand(0);
23622 SDValue Op2 = Cmp.getOperand(1);
23625 const ConstantSDNode* C = nullptr;
23626 bool needOppositeCond = (CC == X86::COND_E);
23627 bool checkAgainstTrue = false; // Is it a comparison against 1?
23629 if ((C = dyn_cast<ConstantSDNode>(Op1)))
23631 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
23633 else // Quit if all operands are not constants.
23636 if (C->getZExtValue() == 1) {
23637 needOppositeCond = !needOppositeCond;
23638 checkAgainstTrue = true;
23639 } else if (C->getZExtValue() != 0)
23640 // Quit if the constant is neither 0 or 1.
23643 bool truncatedToBoolWithAnd = false;
23644 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
23645 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
23646 SetCC.getOpcode() == ISD::TRUNCATE ||
23647 SetCC.getOpcode() == ISD::AND) {
23648 if (SetCC.getOpcode() == ISD::AND) {
23650 ConstantSDNode *CS;
23651 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
23652 CS->getZExtValue() == 1)
23654 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
23655 CS->getZExtValue() == 1)
23659 SetCC = SetCC.getOperand(OpIdx);
23660 truncatedToBoolWithAnd = true;
23662 SetCC = SetCC.getOperand(0);
23665 switch (SetCC.getOpcode()) {
23666 case X86ISD::SETCC_CARRY:
23667 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
23668 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
23669 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
23670 // truncated to i1 using 'and'.
23671 if (checkAgainstTrue && !truncatedToBoolWithAnd)
23673 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
23674 "Invalid use of SETCC_CARRY!");
23676 case X86ISD::SETCC:
23677 // Set the condition code or opposite one if necessary.
23678 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
23679 if (needOppositeCond)
23680 CC = X86::GetOppositeBranchCondition(CC);
23681 return SetCC.getOperand(1);
23682 case X86ISD::CMOV: {
23683 // Check whether false/true value has canonical one, i.e. 0 or 1.
23684 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
23685 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
23686 // Quit if true value is not a constant.
23689 // Quit if false value is not a constant.
23691 SDValue Op = SetCC.getOperand(0);
23692 // Skip 'zext' or 'trunc' node.
23693 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
23694 Op.getOpcode() == ISD::TRUNCATE)
23695 Op = Op.getOperand(0);
23696 // A special case for rdrand/rdseed, where 0 is set if false cond is
23698 if ((Op.getOpcode() != X86ISD::RDRAND &&
23699 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
23702 // Quit if false value is not the constant 0 or 1.
23703 bool FValIsFalse = true;
23704 if (FVal && FVal->getZExtValue() != 0) {
23705 if (FVal->getZExtValue() != 1)
23707 // If FVal is 1, opposite cond is needed.
23708 needOppositeCond = !needOppositeCond;
23709 FValIsFalse = false;
23711 // Quit if TVal is not the constant opposite of FVal.
23712 if (FValIsFalse && TVal->getZExtValue() != 1)
23714 if (!FValIsFalse && TVal->getZExtValue() != 0)
23716 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
23717 if (needOppositeCond)
23718 CC = X86::GetOppositeBranchCondition(CC);
23719 return SetCC.getOperand(3);
23726 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
23727 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
23728 TargetLowering::DAGCombinerInfo &DCI,
23729 const X86Subtarget *Subtarget) {
23732 // If the flag operand isn't dead, don't touch this CMOV.
23733 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
23736 SDValue FalseOp = N->getOperand(0);
23737 SDValue TrueOp = N->getOperand(1);
23738 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
23739 SDValue Cond = N->getOperand(3);
23741 if (CC == X86::COND_E || CC == X86::COND_NE) {
23742 switch (Cond.getOpcode()) {
23746 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
23747 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
23748 return (CC == X86::COND_E) ? FalseOp : TrueOp;
23754 Flags = checkBoolTestSetCCCombine(Cond, CC);
23755 if (Flags.getNode() &&
23756 // Extra check as FCMOV only supports a subset of X86 cond.
23757 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
23758 SDValue Ops[] = { FalseOp, TrueOp,
23759 DAG.getConstant(CC, MVT::i8), Flags };
23760 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
23763 // If this is a select between two integer constants, try to do some
23764 // optimizations. Note that the operands are ordered the opposite of SELECT
23766 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
23767 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
23768 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
23769 // larger than FalseC (the false value).
23770 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
23771 CC = X86::GetOppositeBranchCondition(CC);
23772 std::swap(TrueC, FalseC);
23773 std::swap(TrueOp, FalseOp);
23776 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
23777 // This is efficient for any integer data type (including i8/i16) and
23779 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
23780 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23781 DAG.getConstant(CC, MVT::i8), Cond);
23783 // Zero extend the condition if needed.
23784 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
23786 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23787 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
23788 DAG.getConstant(ShAmt, MVT::i8));
23789 if (N->getNumValues() == 2) // Dead flag value?
23790 return DCI.CombineTo(N, Cond, SDValue());
23794 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
23795 // for any integer data type, including i8/i16.
23796 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23797 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23798 DAG.getConstant(CC, MVT::i8), Cond);
23800 // Zero extend the condition if needed.
23801 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23802 FalseC->getValueType(0), Cond);
23803 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23804 SDValue(FalseC, 0));
23806 if (N->getNumValues() == 2) // Dead flag value?
23807 return DCI.CombineTo(N, Cond, SDValue());
23811 // Optimize cases that will turn into an LEA instruction. This requires
23812 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23813 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23814 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23815 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23817 bool isFastMultiplier = false;
23819 switch ((unsigned char)Diff) {
23821 case 1: // result = add base, cond
23822 case 2: // result = lea base( , cond*2)
23823 case 3: // result = lea base(cond, cond*2)
23824 case 4: // result = lea base( , cond*4)
23825 case 5: // result = lea base(cond, cond*4)
23826 case 8: // result = lea base( , cond*8)
23827 case 9: // result = lea base(cond, cond*8)
23828 isFastMultiplier = true;
23833 if (isFastMultiplier) {
23834 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23835 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23836 DAG.getConstant(CC, MVT::i8), Cond);
23837 // Zero extend the condition if needed.
23838 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23840 // Scale the condition by the difference.
23842 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23843 DAG.getConstant(Diff, Cond.getValueType()));
23845 // Add the base if non-zero.
23846 if (FalseC->getAPIntValue() != 0)
23847 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23848 SDValue(FalseC, 0));
23849 if (N->getNumValues() == 2) // Dead flag value?
23850 return DCI.CombineTo(N, Cond, SDValue());
23857 // Handle these cases:
23858 // (select (x != c), e, c) -> select (x != c), e, x),
23859 // (select (x == c), c, e) -> select (x == c), x, e)
23860 // where the c is an integer constant, and the "select" is the combination
23861 // of CMOV and CMP.
23863 // The rationale for this change is that the conditional-move from a constant
23864 // needs two instructions, however, conditional-move from a register needs
23865 // only one instruction.
23867 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
23868 // some instruction-combining opportunities. This opt needs to be
23869 // postponed as late as possible.
23871 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
23872 // the DCI.xxxx conditions are provided to postpone the optimization as
23873 // late as possible.
23875 ConstantSDNode *CmpAgainst = nullptr;
23876 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
23877 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
23878 !isa<ConstantSDNode>(Cond.getOperand(0))) {
23880 if (CC == X86::COND_NE &&
23881 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
23882 CC = X86::GetOppositeBranchCondition(CC);
23883 std::swap(TrueOp, FalseOp);
23886 if (CC == X86::COND_E &&
23887 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
23888 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
23889 DAG.getConstant(CC, MVT::i8), Cond };
23890 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
23898 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
23899 const X86Subtarget *Subtarget) {
23900 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
23902 default: return SDValue();
23903 // SSE/AVX/AVX2 blend intrinsics.
23904 case Intrinsic::x86_avx2_pblendvb:
23905 case Intrinsic::x86_avx2_pblendw:
23906 case Intrinsic::x86_avx2_pblendd_128:
23907 case Intrinsic::x86_avx2_pblendd_256:
23908 // Don't try to simplify this intrinsic if we don't have AVX2.
23909 if (!Subtarget->hasAVX2())
23912 case Intrinsic::x86_avx_blend_pd_256:
23913 case Intrinsic::x86_avx_blend_ps_256:
23914 case Intrinsic::x86_avx_blendv_pd_256:
23915 case Intrinsic::x86_avx_blendv_ps_256:
23916 // Don't try to simplify this intrinsic if we don't have AVX.
23917 if (!Subtarget->hasAVX())
23920 case Intrinsic::x86_sse41_pblendw:
23921 case Intrinsic::x86_sse41_blendpd:
23922 case Intrinsic::x86_sse41_blendps:
23923 case Intrinsic::x86_sse41_blendvps:
23924 case Intrinsic::x86_sse41_blendvpd:
23925 case Intrinsic::x86_sse41_pblendvb: {
23926 SDValue Op0 = N->getOperand(1);
23927 SDValue Op1 = N->getOperand(2);
23928 SDValue Mask = N->getOperand(3);
23930 // Don't try to simplify this intrinsic if we don't have SSE4.1.
23931 if (!Subtarget->hasSSE41())
23934 // fold (blend A, A, Mask) -> A
23937 // fold (blend A, B, allZeros) -> A
23938 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
23940 // fold (blend A, B, allOnes) -> B
23941 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
23944 // Simplify the case where the mask is a constant i32 value.
23945 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
23946 if (C->isNullValue())
23948 if (C->isAllOnesValue())
23955 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
23956 case Intrinsic::x86_sse2_psrai_w:
23957 case Intrinsic::x86_sse2_psrai_d:
23958 case Intrinsic::x86_avx2_psrai_w:
23959 case Intrinsic::x86_avx2_psrai_d:
23960 case Intrinsic::x86_sse2_psra_w:
23961 case Intrinsic::x86_sse2_psra_d:
23962 case Intrinsic::x86_avx2_psra_w:
23963 case Intrinsic::x86_avx2_psra_d: {
23964 SDValue Op0 = N->getOperand(1);
23965 SDValue Op1 = N->getOperand(2);
23966 EVT VT = Op0.getValueType();
23967 assert(VT.isVector() && "Expected a vector type!");
23969 if (isa<BuildVectorSDNode>(Op1))
23970 Op1 = Op1.getOperand(0);
23972 if (!isa<ConstantSDNode>(Op1))
23975 EVT SVT = VT.getVectorElementType();
23976 unsigned SVTBits = SVT.getSizeInBits();
23978 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
23979 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
23980 uint64_t ShAmt = C.getZExtValue();
23982 // Don't try to convert this shift into a ISD::SRA if the shift
23983 // count is bigger than or equal to the element size.
23984 if (ShAmt >= SVTBits)
23987 // Trivial case: if the shift count is zero, then fold this
23988 // into the first operand.
23992 // Replace this packed shift intrinsic with a target independent
23994 SDValue Splat = DAG.getConstant(C, VT);
23995 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24000 /// PerformMulCombine - Optimize a single multiply with constant into two
24001 /// in order to implement it with two cheaper instructions, e.g.
24002 /// LEA + SHL, LEA + LEA.
24003 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24004 TargetLowering::DAGCombinerInfo &DCI) {
24005 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24008 EVT VT = N->getValueType(0);
24009 if (VT != MVT::i64 && VT != MVT::i32)
24012 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24015 uint64_t MulAmt = C->getZExtValue();
24016 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24019 uint64_t MulAmt1 = 0;
24020 uint64_t MulAmt2 = 0;
24021 if ((MulAmt % 9) == 0) {
24023 MulAmt2 = MulAmt / 9;
24024 } else if ((MulAmt % 5) == 0) {
24026 MulAmt2 = MulAmt / 5;
24027 } else if ((MulAmt % 3) == 0) {
24029 MulAmt2 = MulAmt / 3;
24032 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24035 if (isPowerOf2_64(MulAmt2) &&
24036 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24037 // If second multiplifer is pow2, issue it first. We want the multiply by
24038 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24040 std::swap(MulAmt1, MulAmt2);
24043 if (isPowerOf2_64(MulAmt1))
24044 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24045 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24047 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24048 DAG.getConstant(MulAmt1, VT));
24050 if (isPowerOf2_64(MulAmt2))
24051 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24052 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24054 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24055 DAG.getConstant(MulAmt2, VT));
24057 // Do not add new nodes to DAG combiner worklist.
24058 DCI.CombineTo(N, NewMul, false);
24063 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24064 SDValue N0 = N->getOperand(0);
24065 SDValue N1 = N->getOperand(1);
24066 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24067 EVT VT = N0.getValueType();
24069 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24070 // since the result of setcc_c is all zero's or all ones.
24071 if (VT.isInteger() && !VT.isVector() &&
24072 N1C && N0.getOpcode() == ISD::AND &&
24073 N0.getOperand(1).getOpcode() == ISD::Constant) {
24074 SDValue N00 = N0.getOperand(0);
24075 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24076 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24077 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24078 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24079 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24080 APInt ShAmt = N1C->getAPIntValue();
24081 Mask = Mask.shl(ShAmt);
24083 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24084 N00, DAG.getConstant(Mask, VT));
24088 // Hardware support for vector shifts is sparse which makes us scalarize the
24089 // vector operations in many cases. Also, on sandybridge ADD is faster than
24091 // (shl V, 1) -> add V,V
24092 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24093 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24094 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24095 // We shift all of the values by one. In many cases we do not have
24096 // hardware support for this operation. This is better expressed as an ADD
24098 if (N1SplatC->getZExtValue() == 1)
24099 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24105 /// \brief Returns a vector of 0s if the node in input is a vector logical
24106 /// shift by a constant amount which is known to be bigger than or equal
24107 /// to the vector element size in bits.
24108 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24109 const X86Subtarget *Subtarget) {
24110 EVT VT = N->getValueType(0);
24112 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24113 (!Subtarget->hasInt256() ||
24114 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24117 SDValue Amt = N->getOperand(1);
24119 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24120 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24121 APInt ShiftAmt = AmtSplat->getAPIntValue();
24122 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24124 // SSE2/AVX2 logical shifts always return a vector of 0s
24125 // if the shift amount is bigger than or equal to
24126 // the element size. The constant shift amount will be
24127 // encoded as a 8-bit immediate.
24128 if (ShiftAmt.trunc(8).uge(MaxAmount))
24129 return getZeroVector(VT, Subtarget, DAG, DL);
24135 /// PerformShiftCombine - Combine shifts.
24136 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24137 TargetLowering::DAGCombinerInfo &DCI,
24138 const X86Subtarget *Subtarget) {
24139 if (N->getOpcode() == ISD::SHL) {
24140 SDValue V = PerformSHLCombine(N, DAG);
24141 if (V.getNode()) return V;
24144 if (N->getOpcode() != ISD::SRA) {
24145 // Try to fold this logical shift into a zero vector.
24146 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24147 if (V.getNode()) return V;
24153 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24154 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24155 // and friends. Likewise for OR -> CMPNEQSS.
24156 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24157 TargetLowering::DAGCombinerInfo &DCI,
24158 const X86Subtarget *Subtarget) {
24161 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24162 // we're requiring SSE2 for both.
24163 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24164 SDValue N0 = N->getOperand(0);
24165 SDValue N1 = N->getOperand(1);
24166 SDValue CMP0 = N0->getOperand(1);
24167 SDValue CMP1 = N1->getOperand(1);
24170 // The SETCCs should both refer to the same CMP.
24171 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24174 SDValue CMP00 = CMP0->getOperand(0);
24175 SDValue CMP01 = CMP0->getOperand(1);
24176 EVT VT = CMP00.getValueType();
24178 if (VT == MVT::f32 || VT == MVT::f64) {
24179 bool ExpectingFlags = false;
24180 // Check for any users that want flags:
24181 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24182 !ExpectingFlags && UI != UE; ++UI)
24183 switch (UI->getOpcode()) {
24188 ExpectingFlags = true;
24190 case ISD::CopyToReg:
24191 case ISD::SIGN_EXTEND:
24192 case ISD::ZERO_EXTEND:
24193 case ISD::ANY_EXTEND:
24197 if (!ExpectingFlags) {
24198 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24199 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24201 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24202 X86::CondCode tmp = cc0;
24207 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24208 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24209 // FIXME: need symbolic constants for these magic numbers.
24210 // See X86ATTInstPrinter.cpp:printSSECC().
24211 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24212 if (Subtarget->hasAVX512()) {
24213 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24214 CMP01, DAG.getConstant(x86cc, MVT::i8));
24215 if (N->getValueType(0) != MVT::i1)
24216 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24220 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24221 CMP00.getValueType(), CMP00, CMP01,
24222 DAG.getConstant(x86cc, MVT::i8));
24224 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24225 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24227 if (is64BitFP && !Subtarget->is64Bit()) {
24228 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24229 // 64-bit integer, since that's not a legal type. Since
24230 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24231 // bits, but can do this little dance to extract the lowest 32 bits
24232 // and work with those going forward.
24233 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24235 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24237 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24238 Vector32, DAG.getIntPtrConstant(0));
24242 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24243 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24244 DAG.getConstant(1, IntVT));
24245 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24246 return OneBitOfTruth;
24254 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24255 /// so it can be folded inside ANDNP.
24256 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24257 EVT VT = N->getValueType(0);
24259 // Match direct AllOnes for 128 and 256-bit vectors
24260 if (ISD::isBuildVectorAllOnes(N))
24263 // Look through a bit convert.
24264 if (N->getOpcode() == ISD::BITCAST)
24265 N = N->getOperand(0).getNode();
24267 // Sometimes the operand may come from a insert_subvector building a 256-bit
24269 if (VT.is256BitVector() &&
24270 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24271 SDValue V1 = N->getOperand(0);
24272 SDValue V2 = N->getOperand(1);
24274 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24275 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24276 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24277 ISD::isBuildVectorAllOnes(V2.getNode()))
24284 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24285 // register. In most cases we actually compare or select YMM-sized registers
24286 // and mixing the two types creates horrible code. This method optimizes
24287 // some of the transition sequences.
24288 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24289 TargetLowering::DAGCombinerInfo &DCI,
24290 const X86Subtarget *Subtarget) {
24291 EVT VT = N->getValueType(0);
24292 if (!VT.is256BitVector())
24295 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24296 N->getOpcode() == ISD::ZERO_EXTEND ||
24297 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24299 SDValue Narrow = N->getOperand(0);
24300 EVT NarrowVT = Narrow->getValueType(0);
24301 if (!NarrowVT.is128BitVector())
24304 if (Narrow->getOpcode() != ISD::XOR &&
24305 Narrow->getOpcode() != ISD::AND &&
24306 Narrow->getOpcode() != ISD::OR)
24309 SDValue N0 = Narrow->getOperand(0);
24310 SDValue N1 = Narrow->getOperand(1);
24313 // The Left side has to be a trunc.
24314 if (N0.getOpcode() != ISD::TRUNCATE)
24317 // The type of the truncated inputs.
24318 EVT WideVT = N0->getOperand(0)->getValueType(0);
24322 // The right side has to be a 'trunc' or a constant vector.
24323 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24324 ConstantSDNode *RHSConstSplat = nullptr;
24325 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24326 RHSConstSplat = RHSBV->getConstantSplatNode();
24327 if (!RHSTrunc && !RHSConstSplat)
24330 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24332 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24335 // Set N0 and N1 to hold the inputs to the new wide operation.
24336 N0 = N0->getOperand(0);
24337 if (RHSConstSplat) {
24338 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24339 SDValue(RHSConstSplat, 0));
24340 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24341 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24342 } else if (RHSTrunc) {
24343 N1 = N1->getOperand(0);
24346 // Generate the wide operation.
24347 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24348 unsigned Opcode = N->getOpcode();
24350 case ISD::ANY_EXTEND:
24352 case ISD::ZERO_EXTEND: {
24353 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24354 APInt Mask = APInt::getAllOnesValue(InBits);
24355 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24356 return DAG.getNode(ISD::AND, DL, VT,
24357 Op, DAG.getConstant(Mask, VT));
24359 case ISD::SIGN_EXTEND:
24360 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24361 Op, DAG.getValueType(NarrowVT));
24363 llvm_unreachable("Unexpected opcode");
24367 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24368 TargetLowering::DAGCombinerInfo &DCI,
24369 const X86Subtarget *Subtarget) {
24370 EVT VT = N->getValueType(0);
24371 if (DCI.isBeforeLegalizeOps())
24374 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24378 // Create BEXTR instructions
24379 // BEXTR is ((X >> imm) & (2**size-1))
24380 if (VT == MVT::i32 || VT == MVT::i64) {
24381 SDValue N0 = N->getOperand(0);
24382 SDValue N1 = N->getOperand(1);
24385 // Check for BEXTR.
24386 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24387 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24388 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24389 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24390 if (MaskNode && ShiftNode) {
24391 uint64_t Mask = MaskNode->getZExtValue();
24392 uint64_t Shift = ShiftNode->getZExtValue();
24393 if (isMask_64(Mask)) {
24394 uint64_t MaskSize = CountPopulation_64(Mask);
24395 if (Shift + MaskSize <= VT.getSizeInBits())
24396 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24397 DAG.getConstant(Shift | (MaskSize << 8), VT));
24405 // Want to form ANDNP nodes:
24406 // 1) In the hopes of then easily combining them with OR and AND nodes
24407 // to form PBLEND/PSIGN.
24408 // 2) To match ANDN packed intrinsics
24409 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24412 SDValue N0 = N->getOperand(0);
24413 SDValue N1 = N->getOperand(1);
24416 // Check LHS for vnot
24417 if (N0.getOpcode() == ISD::XOR &&
24418 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24419 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24420 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24422 // Check RHS for vnot
24423 if (N1.getOpcode() == ISD::XOR &&
24424 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24425 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24426 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24431 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24432 TargetLowering::DAGCombinerInfo &DCI,
24433 const X86Subtarget *Subtarget) {
24434 if (DCI.isBeforeLegalizeOps())
24437 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24441 SDValue N0 = N->getOperand(0);
24442 SDValue N1 = N->getOperand(1);
24443 EVT VT = N->getValueType(0);
24445 // look for psign/blend
24446 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24447 if (!Subtarget->hasSSSE3() ||
24448 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24451 // Canonicalize pandn to RHS
24452 if (N0.getOpcode() == X86ISD::ANDNP)
24454 // or (and (m, y), (pandn m, x))
24455 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24456 SDValue Mask = N1.getOperand(0);
24457 SDValue X = N1.getOperand(1);
24459 if (N0.getOperand(0) == Mask)
24460 Y = N0.getOperand(1);
24461 if (N0.getOperand(1) == Mask)
24462 Y = N0.getOperand(0);
24464 // Check to see if the mask appeared in both the AND and ANDNP and
24468 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24469 // Look through mask bitcast.
24470 if (Mask.getOpcode() == ISD::BITCAST)
24471 Mask = Mask.getOperand(0);
24472 if (X.getOpcode() == ISD::BITCAST)
24473 X = X.getOperand(0);
24474 if (Y.getOpcode() == ISD::BITCAST)
24475 Y = Y.getOperand(0);
24477 EVT MaskVT = Mask.getValueType();
24479 // Validate that the Mask operand is a vector sra node.
24480 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24481 // there is no psrai.b
24482 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24483 unsigned SraAmt = ~0;
24484 if (Mask.getOpcode() == ISD::SRA) {
24485 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24486 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24487 SraAmt = AmtConst->getZExtValue();
24488 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24489 SDValue SraC = Mask.getOperand(1);
24490 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24492 if ((SraAmt + 1) != EltBits)
24497 // Now we know we at least have a plendvb with the mask val. See if
24498 // we can form a psignb/w/d.
24499 // psign = x.type == y.type == mask.type && y = sub(0, x);
24500 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24501 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24502 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24503 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24504 "Unsupported VT for PSIGN");
24505 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24506 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24508 // PBLENDVB only available on SSE 4.1
24509 if (!Subtarget->hasSSE41())
24512 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
24514 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
24515 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
24516 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
24517 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
24518 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24522 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
24525 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
24526 MachineFunction &MF = DAG.getMachineFunction();
24527 bool OptForSize = MF.getFunction()->getAttributes().
24528 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
24530 // SHLD/SHRD instructions have lower register pressure, but on some
24531 // platforms they have higher latency than the equivalent
24532 // series of shifts/or that would otherwise be generated.
24533 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
24534 // have higher latencies and we are not optimizing for size.
24535 if (!OptForSize && Subtarget->isSHLDSlow())
24538 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
24540 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
24542 if (!N0.hasOneUse() || !N1.hasOneUse())
24545 SDValue ShAmt0 = N0.getOperand(1);
24546 if (ShAmt0.getValueType() != MVT::i8)
24548 SDValue ShAmt1 = N1.getOperand(1);
24549 if (ShAmt1.getValueType() != MVT::i8)
24551 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
24552 ShAmt0 = ShAmt0.getOperand(0);
24553 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
24554 ShAmt1 = ShAmt1.getOperand(0);
24557 unsigned Opc = X86ISD::SHLD;
24558 SDValue Op0 = N0.getOperand(0);
24559 SDValue Op1 = N1.getOperand(0);
24560 if (ShAmt0.getOpcode() == ISD::SUB) {
24561 Opc = X86ISD::SHRD;
24562 std::swap(Op0, Op1);
24563 std::swap(ShAmt0, ShAmt1);
24566 unsigned Bits = VT.getSizeInBits();
24567 if (ShAmt1.getOpcode() == ISD::SUB) {
24568 SDValue Sum = ShAmt1.getOperand(0);
24569 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
24570 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
24571 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
24572 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
24573 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
24574 return DAG.getNode(Opc, DL, VT,
24576 DAG.getNode(ISD::TRUNCATE, DL,
24579 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
24580 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
24582 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
24583 return DAG.getNode(Opc, DL, VT,
24584 N0.getOperand(0), N1.getOperand(0),
24585 DAG.getNode(ISD::TRUNCATE, DL,
24592 // Generate NEG and CMOV for integer abs.
24593 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
24594 EVT VT = N->getValueType(0);
24596 // Since X86 does not have CMOV for 8-bit integer, we don't convert
24597 // 8-bit integer abs to NEG and CMOV.
24598 if (VT.isInteger() && VT.getSizeInBits() == 8)
24601 SDValue N0 = N->getOperand(0);
24602 SDValue N1 = N->getOperand(1);
24605 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
24606 // and change it to SUB and CMOV.
24607 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
24608 N0.getOpcode() == ISD::ADD &&
24609 N0.getOperand(1) == N1 &&
24610 N1.getOpcode() == ISD::SRA &&
24611 N1.getOperand(0) == N0.getOperand(0))
24612 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
24613 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
24614 // Generate SUB & CMOV.
24615 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
24616 DAG.getConstant(0, VT), N0.getOperand(0));
24618 SDValue Ops[] = { N0.getOperand(0), Neg,
24619 DAG.getConstant(X86::COND_GE, MVT::i8),
24620 SDValue(Neg.getNode(), 1) };
24621 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
24626 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
24627 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
24628 TargetLowering::DAGCombinerInfo &DCI,
24629 const X86Subtarget *Subtarget) {
24630 if (DCI.isBeforeLegalizeOps())
24633 if (Subtarget->hasCMov()) {
24634 SDValue RV = performIntegerAbsCombine(N, DAG);
24642 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
24643 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
24644 TargetLowering::DAGCombinerInfo &DCI,
24645 const X86Subtarget *Subtarget) {
24646 LoadSDNode *Ld = cast<LoadSDNode>(N);
24647 EVT RegVT = Ld->getValueType(0);
24648 EVT MemVT = Ld->getMemoryVT();
24650 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24652 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
24653 // into two 16-byte operations.
24654 ISD::LoadExtType Ext = Ld->getExtensionType();
24655 unsigned Alignment = Ld->getAlignment();
24656 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
24657 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
24658 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
24659 unsigned NumElems = RegVT.getVectorNumElements();
24663 SDValue Ptr = Ld->getBasePtr();
24664 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
24666 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
24668 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
24669 Ld->getPointerInfo(), Ld->isVolatile(),
24670 Ld->isNonTemporal(), Ld->isInvariant(),
24672 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
24673 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
24674 Ld->getPointerInfo(), Ld->isVolatile(),
24675 Ld->isNonTemporal(), Ld->isInvariant(),
24676 std::min(16U, Alignment));
24677 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
24679 Load2.getValue(1));
24681 SDValue NewVec = DAG.getUNDEF(RegVT);
24682 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
24683 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
24684 return DCI.CombineTo(N, NewVec, TF, true);
24690 /// PerformMLOADCombine - Resolve extending loads
24691 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
24692 TargetLowering::DAGCombinerInfo &DCI,
24693 const X86Subtarget *Subtarget) {
24694 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
24695 if (Mld->getExtensionType() != ISD::SEXTLOAD)
24698 EVT VT = Mld->getValueType(0);
24699 unsigned NumElems = VT.getVectorNumElements();
24700 EVT LdVT = Mld->getMemoryVT();
24703 assert(LdVT != VT && "Cannot extend to the same type");
24704 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
24705 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
24706 // From, To sizes and ElemCount must be pow of two
24707 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
24708 "Unexpected size for extending masked load");
24710 unsigned SizeRatio = ToSz / FromSz;
24711 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
24713 // Create a type on which we perform the shuffle
24714 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24715 LdVT.getScalarType(), NumElems*SizeRatio);
24716 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
24718 // Convert Src0 value
24719 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
24720 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
24721 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24722 for (unsigned i = 0; i != NumElems; ++i)
24723 ShuffleVec[i] = i * SizeRatio;
24725 // Can't shuffle using an illegal type.
24726 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
24727 && "WideVecVT should be legal");
24728 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
24729 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
24731 // Prepare the new mask
24733 SDValue Mask = Mld->getMask();
24734 if (Mask.getValueType() == VT) {
24735 // Mask and original value have the same type
24736 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
24737 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24738 for (unsigned i = 0; i != NumElems; ++i)
24739 ShuffleVec[i] = i * SizeRatio;
24740 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
24741 ShuffleVec[i] = NumElems*SizeRatio;
24742 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
24743 DAG.getConstant(0, WideVecVT),
24747 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
24748 unsigned WidenNumElts = NumElems*SizeRatio;
24749 unsigned MaskNumElts = VT.getVectorNumElements();
24750 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
24753 unsigned NumConcat = WidenNumElts / MaskNumElts;
24754 SmallVector<SDValue, 16> Ops(NumConcat);
24755 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
24757 for (unsigned i = 1; i != NumConcat; ++i)
24760 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
24763 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
24764 Mld->getBasePtr(), NewMask, WideSrc0,
24765 Mld->getMemoryVT(), Mld->getMemOperand(),
24767 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
24768 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
24771 /// PerformMSTORECombine - Resolve truncating stores
24772 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
24773 const X86Subtarget *Subtarget) {
24774 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
24775 if (!Mst->isTruncatingStore())
24778 EVT VT = Mst->getValue().getValueType();
24779 unsigned NumElems = VT.getVectorNumElements();
24780 EVT StVT = Mst->getMemoryVT();
24783 assert(StVT != VT && "Cannot truncate to the same type");
24784 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
24785 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
24787 // From, To sizes and ElemCount must be pow of two
24788 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
24789 "Unexpected size for truncating masked store");
24790 // We are going to use the original vector elt for storing.
24791 // Accumulated smaller vector elements must be a multiple of the store size.
24792 assert (((NumElems * FromSz) % ToSz) == 0 &&
24793 "Unexpected ratio for truncating masked store");
24795 unsigned SizeRatio = FromSz / ToSz;
24796 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
24798 // Create a type on which we perform the shuffle
24799 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24800 StVT.getScalarType(), NumElems*SizeRatio);
24802 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
24804 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
24805 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24806 for (unsigned i = 0; i != NumElems; ++i)
24807 ShuffleVec[i] = i * SizeRatio;
24809 // Can't shuffle using an illegal type.
24810 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
24811 && "WideVecVT should be legal");
24813 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
24814 DAG.getUNDEF(WideVecVT),
24818 SDValue Mask = Mst->getMask();
24819 if (Mask.getValueType() == VT) {
24820 // Mask and original value have the same type
24821 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
24822 for (unsigned i = 0; i != NumElems; ++i)
24823 ShuffleVec[i] = i * SizeRatio;
24824 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
24825 ShuffleVec[i] = NumElems*SizeRatio;
24826 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
24827 DAG.getConstant(0, WideVecVT),
24831 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
24832 unsigned WidenNumElts = NumElems*SizeRatio;
24833 unsigned MaskNumElts = VT.getVectorNumElements();
24834 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
24837 unsigned NumConcat = WidenNumElts / MaskNumElts;
24838 SmallVector<SDValue, 16> Ops(NumConcat);
24839 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
24841 for (unsigned i = 1; i != NumConcat; ++i)
24844 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
24847 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
24848 NewMask, StVT, Mst->getMemOperand(), false);
24850 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
24851 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
24852 const X86Subtarget *Subtarget) {
24853 StoreSDNode *St = cast<StoreSDNode>(N);
24854 EVT VT = St->getValue().getValueType();
24855 EVT StVT = St->getMemoryVT();
24857 SDValue StoredVal = St->getOperand(1);
24858 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24860 // If we are saving a concatenation of two XMM registers and 32-byte stores
24861 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
24862 unsigned Alignment = St->getAlignment();
24863 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
24864 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
24865 StVT == VT && !IsAligned) {
24866 unsigned NumElems = VT.getVectorNumElements();
24870 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
24871 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
24873 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
24874 SDValue Ptr0 = St->getBasePtr();
24875 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
24877 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
24878 St->getPointerInfo(), St->isVolatile(),
24879 St->isNonTemporal(), Alignment);
24880 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
24881 St->getPointerInfo(), St->isVolatile(),
24882 St->isNonTemporal(),
24883 std::min(16U, Alignment));
24884 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
24887 // Optimize trunc store (of multiple scalars) to shuffle and store.
24888 // First, pack all of the elements in one place. Next, store to memory
24889 // in fewer chunks.
24890 if (St->isTruncatingStore() && VT.isVector()) {
24891 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24892 unsigned NumElems = VT.getVectorNumElements();
24893 assert(StVT != VT && "Cannot truncate to the same type");
24894 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
24895 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
24897 // From, To sizes and ElemCount must be pow of two
24898 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
24899 // We are going to use the original vector elt for storing.
24900 // Accumulated smaller vector elements must be a multiple of the store size.
24901 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
24903 unsigned SizeRatio = FromSz / ToSz;
24905 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
24907 // Create a type on which we perform the shuffle
24908 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24909 StVT.getScalarType(), NumElems*SizeRatio);
24911 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
24913 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
24914 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
24915 for (unsigned i = 0; i != NumElems; ++i)
24916 ShuffleVec[i] = i * SizeRatio;
24918 // Can't shuffle using an illegal type.
24919 if (!TLI.isTypeLegal(WideVecVT))
24922 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
24923 DAG.getUNDEF(WideVecVT),
24925 // At this point all of the data is stored at the bottom of the
24926 // register. We now need to save it to mem.
24928 // Find the largest store unit
24929 MVT StoreType = MVT::i8;
24930 for (MVT Tp : MVT::integer_valuetypes()) {
24931 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
24935 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
24936 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
24937 (64 <= NumElems * ToSz))
24938 StoreType = MVT::f64;
24940 // Bitcast the original vector into a vector of store-size units
24941 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
24942 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
24943 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
24944 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
24945 SmallVector<SDValue, 8> Chains;
24946 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
24947 TLI.getPointerTy());
24948 SDValue Ptr = St->getBasePtr();
24950 // Perform one or more big stores into memory.
24951 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
24952 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
24953 StoreType, ShuffWide,
24954 DAG.getIntPtrConstant(i));
24955 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
24956 St->getPointerInfo(), St->isVolatile(),
24957 St->isNonTemporal(), St->getAlignment());
24958 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
24959 Chains.push_back(Ch);
24962 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
24965 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
24966 // the FP state in cases where an emms may be missing.
24967 // A preferable solution to the general problem is to figure out the right
24968 // places to insert EMMS. This qualifies as a quick hack.
24970 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
24971 if (VT.getSizeInBits() != 64)
24974 const Function *F = DAG.getMachineFunction().getFunction();
24975 bool NoImplicitFloatOps = F->getAttributes().
24976 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
24977 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
24978 && Subtarget->hasSSE2();
24979 if ((VT.isVector() ||
24980 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
24981 isa<LoadSDNode>(St->getValue()) &&
24982 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
24983 St->getChain().hasOneUse() && !St->isVolatile()) {
24984 SDNode* LdVal = St->getValue().getNode();
24985 LoadSDNode *Ld = nullptr;
24986 int TokenFactorIndex = -1;
24987 SmallVector<SDValue, 8> Ops;
24988 SDNode* ChainVal = St->getChain().getNode();
24989 // Must be a store of a load. We currently handle two cases: the load
24990 // is a direct child, and it's under an intervening TokenFactor. It is
24991 // possible to dig deeper under nested TokenFactors.
24992 if (ChainVal == LdVal)
24993 Ld = cast<LoadSDNode>(St->getChain());
24994 else if (St->getValue().hasOneUse() &&
24995 ChainVal->getOpcode() == ISD::TokenFactor) {
24996 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
24997 if (ChainVal->getOperand(i).getNode() == LdVal) {
24998 TokenFactorIndex = i;
24999 Ld = cast<LoadSDNode>(St->getValue());
25001 Ops.push_back(ChainVal->getOperand(i));
25005 if (!Ld || !ISD::isNormalLoad(Ld))
25008 // If this is not the MMX case, i.e. we are just turning i64 load/store
25009 // into f64 load/store, avoid the transformation if there are multiple
25010 // uses of the loaded value.
25011 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25016 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25017 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25019 if (Subtarget->is64Bit() || F64IsLegal) {
25020 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25021 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25022 Ld->getPointerInfo(), Ld->isVolatile(),
25023 Ld->isNonTemporal(), Ld->isInvariant(),
25024 Ld->getAlignment());
25025 SDValue NewChain = NewLd.getValue(1);
25026 if (TokenFactorIndex != -1) {
25027 Ops.push_back(NewChain);
25028 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25030 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25031 St->getPointerInfo(),
25032 St->isVolatile(), St->isNonTemporal(),
25033 St->getAlignment());
25036 // Otherwise, lower to two pairs of 32-bit loads / stores.
25037 SDValue LoAddr = Ld->getBasePtr();
25038 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25039 DAG.getConstant(4, MVT::i32));
25041 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25042 Ld->getPointerInfo(),
25043 Ld->isVolatile(), Ld->isNonTemporal(),
25044 Ld->isInvariant(), Ld->getAlignment());
25045 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25046 Ld->getPointerInfo().getWithOffset(4),
25047 Ld->isVolatile(), Ld->isNonTemporal(),
25049 MinAlign(Ld->getAlignment(), 4));
25051 SDValue NewChain = LoLd.getValue(1);
25052 if (TokenFactorIndex != -1) {
25053 Ops.push_back(LoLd);
25054 Ops.push_back(HiLd);
25055 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25058 LoAddr = St->getBasePtr();
25059 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25060 DAG.getConstant(4, MVT::i32));
25062 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25063 St->getPointerInfo(),
25064 St->isVolatile(), St->isNonTemporal(),
25065 St->getAlignment());
25066 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25067 St->getPointerInfo().getWithOffset(4),
25069 St->isNonTemporal(),
25070 MinAlign(St->getAlignment(), 4));
25071 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25076 /// Return 'true' if this vector operation is "horizontal"
25077 /// and return the operands for the horizontal operation in LHS and RHS. A
25078 /// horizontal operation performs the binary operation on successive elements
25079 /// of its first operand, then on successive elements of its second operand,
25080 /// returning the resulting values in a vector. For example, if
25081 /// A = < float a0, float a1, float a2, float a3 >
25083 /// B = < float b0, float b1, float b2, float b3 >
25084 /// then the result of doing a horizontal operation on A and B is
25085 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25086 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25087 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25088 /// set to A, RHS to B, and the routine returns 'true'.
25089 /// Note that the binary operation should have the property that if one of the
25090 /// operands is UNDEF then the result is UNDEF.
25091 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25092 // Look for the following pattern: if
25093 // A = < float a0, float a1, float a2, float a3 >
25094 // B = < float b0, float b1, float b2, float b3 >
25096 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25097 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25098 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25099 // which is A horizontal-op B.
25101 // At least one of the operands should be a vector shuffle.
25102 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25103 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25106 MVT VT = LHS.getSimpleValueType();
25108 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25109 "Unsupported vector type for horizontal add/sub");
25111 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25112 // operate independently on 128-bit lanes.
25113 unsigned NumElts = VT.getVectorNumElements();
25114 unsigned NumLanes = VT.getSizeInBits()/128;
25115 unsigned NumLaneElts = NumElts / NumLanes;
25116 assert((NumLaneElts % 2 == 0) &&
25117 "Vector type should have an even number of elements in each lane");
25118 unsigned HalfLaneElts = NumLaneElts/2;
25120 // View LHS in the form
25121 // LHS = VECTOR_SHUFFLE A, B, LMask
25122 // If LHS is not a shuffle then pretend it is the shuffle
25123 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25124 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25127 SmallVector<int, 16> LMask(NumElts);
25128 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25129 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25130 A = LHS.getOperand(0);
25131 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25132 B = LHS.getOperand(1);
25133 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25134 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25136 if (LHS.getOpcode() != ISD::UNDEF)
25138 for (unsigned i = 0; i != NumElts; ++i)
25142 // Likewise, view RHS in the form
25143 // RHS = VECTOR_SHUFFLE C, D, RMask
25145 SmallVector<int, 16> RMask(NumElts);
25146 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25147 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25148 C = RHS.getOperand(0);
25149 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25150 D = RHS.getOperand(1);
25151 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25152 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25154 if (RHS.getOpcode() != ISD::UNDEF)
25156 for (unsigned i = 0; i != NumElts; ++i)
25160 // Check that the shuffles are both shuffling the same vectors.
25161 if (!(A == C && B == D) && !(A == D && B == C))
25164 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25165 if (!A.getNode() && !B.getNode())
25168 // If A and B occur in reverse order in RHS, then "swap" them (which means
25169 // rewriting the mask).
25171 CommuteVectorShuffleMask(RMask, NumElts);
25173 // At this point LHS and RHS are equivalent to
25174 // LHS = VECTOR_SHUFFLE A, B, LMask
25175 // RHS = VECTOR_SHUFFLE A, B, RMask
25176 // Check that the masks correspond to performing a horizontal operation.
25177 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25178 for (unsigned i = 0; i != NumLaneElts; ++i) {
25179 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25181 // Ignore any UNDEF components.
25182 if (LIdx < 0 || RIdx < 0 ||
25183 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25184 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25187 // Check that successive elements are being operated on. If not, this is
25188 // not a horizontal operation.
25189 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25190 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25191 if (!(LIdx == Index && RIdx == Index + 1) &&
25192 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25197 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25198 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25202 /// Do target-specific dag combines on floating point adds.
25203 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25204 const X86Subtarget *Subtarget) {
25205 EVT VT = N->getValueType(0);
25206 SDValue LHS = N->getOperand(0);
25207 SDValue RHS = N->getOperand(1);
25209 // Try to synthesize horizontal adds from adds of shuffles.
25210 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25211 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25212 isHorizontalBinOp(LHS, RHS, true))
25213 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25217 /// Do target-specific dag combines on floating point subs.
25218 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25219 const X86Subtarget *Subtarget) {
25220 EVT VT = N->getValueType(0);
25221 SDValue LHS = N->getOperand(0);
25222 SDValue RHS = N->getOperand(1);
25224 // Try to synthesize horizontal subs from subs of shuffles.
25225 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25226 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25227 isHorizontalBinOp(LHS, RHS, false))
25228 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25232 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25233 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25234 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25235 // F[X]OR(0.0, x) -> x
25236 // F[X]OR(x, 0.0) -> x
25237 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25238 if (C->getValueAPF().isPosZero())
25239 return N->getOperand(1);
25240 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25241 if (C->getValueAPF().isPosZero())
25242 return N->getOperand(0);
25246 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25247 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25248 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25250 // Only perform optimizations if UnsafeMath is used.
25251 if (!DAG.getTarget().Options.UnsafeFPMath)
25254 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25255 // into FMINC and FMAXC, which are Commutative operations.
25256 unsigned NewOp = 0;
25257 switch (N->getOpcode()) {
25258 default: llvm_unreachable("unknown opcode");
25259 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25260 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25263 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25264 N->getOperand(0), N->getOperand(1));
25267 /// Do target-specific dag combines on X86ISD::FAND nodes.
25268 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25269 // FAND(0.0, x) -> 0.0
25270 // FAND(x, 0.0) -> 0.0
25271 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25272 if (C->getValueAPF().isPosZero())
25273 return N->getOperand(0);
25274 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25275 if (C->getValueAPF().isPosZero())
25276 return N->getOperand(1);
25280 /// Do target-specific dag combines on X86ISD::FANDN nodes
25281 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25282 // FANDN(x, 0.0) -> 0.0
25283 // FANDN(0.0, x) -> x
25284 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25285 if (C->getValueAPF().isPosZero())
25286 return N->getOperand(1);
25287 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25288 if (C->getValueAPF().isPosZero())
25289 return N->getOperand(1);
25293 static SDValue PerformBTCombine(SDNode *N,
25295 TargetLowering::DAGCombinerInfo &DCI) {
25296 // BT ignores high bits in the bit index operand.
25297 SDValue Op1 = N->getOperand(1);
25298 if (Op1.hasOneUse()) {
25299 unsigned BitWidth = Op1.getValueSizeInBits();
25300 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25301 APInt KnownZero, KnownOne;
25302 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25303 !DCI.isBeforeLegalizeOps());
25304 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25305 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25306 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25307 DCI.CommitTargetLoweringOpt(TLO);
25312 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25313 SDValue Op = N->getOperand(0);
25314 if (Op.getOpcode() == ISD::BITCAST)
25315 Op = Op.getOperand(0);
25316 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25317 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25318 VT.getVectorElementType().getSizeInBits() ==
25319 OpVT.getVectorElementType().getSizeInBits()) {
25320 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25325 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25326 const X86Subtarget *Subtarget) {
25327 EVT VT = N->getValueType(0);
25328 if (!VT.isVector())
25331 SDValue N0 = N->getOperand(0);
25332 SDValue N1 = N->getOperand(1);
25333 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25336 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25337 // both SSE and AVX2 since there is no sign-extended shift right
25338 // operation on a vector with 64-bit elements.
25339 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25340 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25341 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25342 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25343 SDValue N00 = N0.getOperand(0);
25345 // EXTLOAD has a better solution on AVX2,
25346 // it may be replaced with X86ISD::VSEXT node.
25347 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25348 if (!ISD::isNormalLoad(N00.getNode()))
25351 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25352 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25354 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25360 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25361 TargetLowering::DAGCombinerInfo &DCI,
25362 const X86Subtarget *Subtarget) {
25363 SDValue N0 = N->getOperand(0);
25364 EVT VT = N->getValueType(0);
25366 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25367 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25368 // This exposes the sext to the sdivrem lowering, so that it directly extends
25369 // from AH (which we otherwise need to do contortions to access).
25370 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25371 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25373 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25374 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25375 N0.getOperand(0), N0.getOperand(1));
25376 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25377 return R.getValue(1);
25380 if (!DCI.isBeforeLegalizeOps())
25383 if (!Subtarget->hasFp256())
25386 if (VT.isVector() && VT.getSizeInBits() == 256) {
25387 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25395 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25396 const X86Subtarget* Subtarget) {
25398 EVT VT = N->getValueType(0);
25400 // Let legalize expand this if it isn't a legal type yet.
25401 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25404 EVT ScalarVT = VT.getScalarType();
25405 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25406 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25409 SDValue A = N->getOperand(0);
25410 SDValue B = N->getOperand(1);
25411 SDValue C = N->getOperand(2);
25413 bool NegA = (A.getOpcode() == ISD::FNEG);
25414 bool NegB = (B.getOpcode() == ISD::FNEG);
25415 bool NegC = (C.getOpcode() == ISD::FNEG);
25417 // Negative multiplication when NegA xor NegB
25418 bool NegMul = (NegA != NegB);
25420 A = A.getOperand(0);
25422 B = B.getOperand(0);
25424 C = C.getOperand(0);
25428 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25430 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25432 return DAG.getNode(Opcode, dl, VT, A, B, C);
25435 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25436 TargetLowering::DAGCombinerInfo &DCI,
25437 const X86Subtarget *Subtarget) {
25438 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25439 // (and (i32 x86isd::setcc_carry), 1)
25440 // This eliminates the zext. This transformation is necessary because
25441 // ISD::SETCC is always legalized to i8.
25443 SDValue N0 = N->getOperand(0);
25444 EVT VT = N->getValueType(0);
25446 if (N0.getOpcode() == ISD::AND &&
25448 N0.getOperand(0).hasOneUse()) {
25449 SDValue N00 = N0.getOperand(0);
25450 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25451 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25452 if (!C || C->getZExtValue() != 1)
25454 return DAG.getNode(ISD::AND, dl, VT,
25455 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25456 N00.getOperand(0), N00.getOperand(1)),
25457 DAG.getConstant(1, VT));
25461 if (N0.getOpcode() == ISD::TRUNCATE &&
25463 N0.getOperand(0).hasOneUse()) {
25464 SDValue N00 = N0.getOperand(0);
25465 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25466 return DAG.getNode(ISD::AND, dl, VT,
25467 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25468 N00.getOperand(0), N00.getOperand(1)),
25469 DAG.getConstant(1, VT));
25472 if (VT.is256BitVector()) {
25473 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25478 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25479 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25480 // This exposes the zext to the udivrem lowering, so that it directly extends
25481 // from AH (which we otherwise need to do contortions to access).
25482 if (N0.getOpcode() == ISD::UDIVREM &&
25483 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25484 (VT == MVT::i32 || VT == MVT::i64)) {
25485 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25486 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25487 N0.getOperand(0), N0.getOperand(1));
25488 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25489 return R.getValue(1);
25495 // Optimize x == -y --> x+y == 0
25496 // x != -y --> x+y != 0
25497 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25498 const X86Subtarget* Subtarget) {
25499 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25500 SDValue LHS = N->getOperand(0);
25501 SDValue RHS = N->getOperand(1);
25502 EVT VT = N->getValueType(0);
25505 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
25506 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
25507 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
25508 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25509 LHS.getValueType(), RHS, LHS.getOperand(1));
25510 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25511 addV, DAG.getConstant(0, addV.getValueType()), CC);
25513 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
25514 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
25515 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
25516 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25517 RHS.getValueType(), LHS, RHS.getOperand(1));
25518 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25519 addV, DAG.getConstant(0, addV.getValueType()), CC);
25522 if (VT.getScalarType() == MVT::i1) {
25523 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
25524 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25525 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
25526 if (!IsSEXT0 && !IsVZero0)
25528 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
25529 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25530 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
25532 if (!IsSEXT1 && !IsVZero1)
25535 if (IsSEXT0 && IsVZero1) {
25536 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
25537 if (CC == ISD::SETEQ)
25538 return DAG.getNOT(DL, LHS.getOperand(0), VT);
25539 return LHS.getOperand(0);
25541 if (IsSEXT1 && IsVZero0) {
25542 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
25543 if (CC == ISD::SETEQ)
25544 return DAG.getNOT(DL, RHS.getOperand(0), VT);
25545 return RHS.getOperand(0);
25552 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
25553 const X86Subtarget *Subtarget) {
25555 MVT VT = N->getOperand(1)->getSimpleValueType(0);
25556 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
25557 "X86insertps is only defined for v4x32");
25559 SDValue Ld = N->getOperand(1);
25560 if (MayFoldLoad(Ld)) {
25561 // Extract the countS bits from the immediate so we can get the proper
25562 // address when narrowing the vector load to a specific element.
25563 // When the second source op is a memory address, interps doesn't use
25564 // countS and just gets an f32 from that address.
25565 unsigned DestIndex =
25566 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
25567 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
25571 // Create this as a scalar to vector to match the instruction pattern.
25572 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
25573 // countS bits are ignored when loading from memory on insertps, which
25574 // means we don't need to explicitly set them to 0.
25575 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
25576 LoadScalarToVector, N->getOperand(2));
25579 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
25580 // as "sbb reg,reg", since it can be extended without zext and produces
25581 // an all-ones bit which is more useful than 0/1 in some cases.
25582 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
25585 return DAG.getNode(ISD::AND, DL, VT,
25586 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25587 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
25588 DAG.getConstant(1, VT));
25589 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
25590 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
25591 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25592 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
25595 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
25596 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
25597 TargetLowering::DAGCombinerInfo &DCI,
25598 const X86Subtarget *Subtarget) {
25600 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
25601 SDValue EFLAGS = N->getOperand(1);
25603 if (CC == X86::COND_A) {
25604 // Try to convert COND_A into COND_B in an attempt to facilitate
25605 // materializing "setb reg".
25607 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
25608 // cannot take an immediate as its first operand.
25610 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
25611 EFLAGS.getValueType().isInteger() &&
25612 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
25613 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
25614 EFLAGS.getNode()->getVTList(),
25615 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
25616 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
25617 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
25621 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
25622 // a zext and produces an all-ones bit which is more useful than 0/1 in some
25624 if (CC == X86::COND_B)
25625 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
25629 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25630 if (Flags.getNode()) {
25631 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25632 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
25638 // Optimize branch condition evaluation.
25640 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
25641 TargetLowering::DAGCombinerInfo &DCI,
25642 const X86Subtarget *Subtarget) {
25644 SDValue Chain = N->getOperand(0);
25645 SDValue Dest = N->getOperand(1);
25646 SDValue EFLAGS = N->getOperand(3);
25647 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
25651 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25652 if (Flags.getNode()) {
25653 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25654 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
25661 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
25662 SelectionDAG &DAG) {
25663 // Take advantage of vector comparisons producing 0 or -1 in each lane to
25664 // optimize away operation when it's from a constant.
25666 // The general transformation is:
25667 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
25668 // AND(VECTOR_CMP(x,y), constant2)
25669 // constant2 = UNARYOP(constant)
25671 // Early exit if this isn't a vector operation, the operand of the
25672 // unary operation isn't a bitwise AND, or if the sizes of the operations
25673 // aren't the same.
25674 EVT VT = N->getValueType(0);
25675 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
25676 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
25677 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
25680 // Now check that the other operand of the AND is a constant. We could
25681 // make the transformation for non-constant splats as well, but it's unclear
25682 // that would be a benefit as it would not eliminate any operations, just
25683 // perform one more step in scalar code before moving to the vector unit.
25684 if (BuildVectorSDNode *BV =
25685 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
25686 // Bail out if the vector isn't a constant.
25687 if (!BV->isConstant())
25690 // Everything checks out. Build up the new and improved node.
25692 EVT IntVT = BV->getValueType(0);
25693 // Create a new constant of the appropriate type for the transformed
25695 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
25696 // The AND node needs bitcasts to/from an integer vector type around it.
25697 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
25698 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
25699 N->getOperand(0)->getOperand(0), MaskConst);
25700 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
25707 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
25708 const X86TargetLowering *XTLI) {
25709 // First try to optimize away the conversion entirely when it's
25710 // conditionally from a constant. Vectors only.
25711 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
25712 if (Res != SDValue())
25715 // Now move on to more general possibilities.
25716 SDValue Op0 = N->getOperand(0);
25717 EVT InVT = Op0->getValueType(0);
25719 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
25720 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
25722 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
25723 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
25724 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
25727 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
25728 // a 32-bit target where SSE doesn't support i64->FP operations.
25729 if (Op0.getOpcode() == ISD::LOAD) {
25730 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
25731 EVT VT = Ld->getValueType(0);
25732 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
25733 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
25734 !XTLI->getSubtarget()->is64Bit() &&
25736 SDValue FILDChain = XTLI->BuildFILD(SDValue(N, 0), Ld->getValueType(0),
25737 Ld->getChain(), Op0, DAG);
25738 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
25745 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
25746 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
25747 X86TargetLowering::DAGCombinerInfo &DCI) {
25748 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
25749 // the result is either zero or one (depending on the input carry bit).
25750 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
25751 if (X86::isZeroNode(N->getOperand(0)) &&
25752 X86::isZeroNode(N->getOperand(1)) &&
25753 // We don't have a good way to replace an EFLAGS use, so only do this when
25755 SDValue(N, 1).use_empty()) {
25757 EVT VT = N->getValueType(0);
25758 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
25759 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
25760 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
25761 DAG.getConstant(X86::COND_B,MVT::i8),
25763 DAG.getConstant(1, VT));
25764 return DCI.CombineTo(N, Res1, CarryOut);
25770 // fold (add Y, (sete X, 0)) -> adc 0, Y
25771 // (add Y, (setne X, 0)) -> sbb -1, Y
25772 // (sub (sete X, 0), Y) -> sbb 0, Y
25773 // (sub (setne X, 0), Y) -> adc -1, Y
25774 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
25777 // Look through ZExts.
25778 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
25779 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
25782 SDValue SetCC = Ext.getOperand(0);
25783 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
25786 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
25787 if (CC != X86::COND_E && CC != X86::COND_NE)
25790 SDValue Cmp = SetCC.getOperand(1);
25791 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
25792 !X86::isZeroNode(Cmp.getOperand(1)) ||
25793 !Cmp.getOperand(0).getValueType().isInteger())
25796 SDValue CmpOp0 = Cmp.getOperand(0);
25797 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
25798 DAG.getConstant(1, CmpOp0.getValueType()));
25800 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
25801 if (CC == X86::COND_NE)
25802 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
25803 DL, OtherVal.getValueType(), OtherVal,
25804 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
25805 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
25806 DL, OtherVal.getValueType(), OtherVal,
25807 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
25810 /// PerformADDCombine - Do target-specific dag combines on integer adds.
25811 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
25812 const X86Subtarget *Subtarget) {
25813 EVT VT = N->getValueType(0);
25814 SDValue Op0 = N->getOperand(0);
25815 SDValue Op1 = N->getOperand(1);
25817 // Try to synthesize horizontal adds from adds of shuffles.
25818 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
25819 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
25820 isHorizontalBinOp(Op0, Op1, true))
25821 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
25823 return OptimizeConditionalInDecrement(N, DAG);
25826 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
25827 const X86Subtarget *Subtarget) {
25828 SDValue Op0 = N->getOperand(0);
25829 SDValue Op1 = N->getOperand(1);
25831 // X86 can't encode an immediate LHS of a sub. See if we can push the
25832 // negation into a preceding instruction.
25833 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
25834 // If the RHS of the sub is a XOR with one use and a constant, invert the
25835 // immediate. Then add one to the LHS of the sub so we can turn
25836 // X-Y -> X+~Y+1, saving one register.
25837 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
25838 isa<ConstantSDNode>(Op1.getOperand(1))) {
25839 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
25840 EVT VT = Op0.getValueType();
25841 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
25843 DAG.getConstant(~XorC, VT));
25844 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
25845 DAG.getConstant(C->getAPIntValue()+1, VT));
25849 // Try to synthesize horizontal adds from adds of shuffles.
25850 EVT VT = N->getValueType(0);
25851 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
25852 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
25853 isHorizontalBinOp(Op0, Op1, true))
25854 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
25856 return OptimizeConditionalInDecrement(N, DAG);
25859 /// performVZEXTCombine - Performs build vector combines
25860 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
25861 TargetLowering::DAGCombinerInfo &DCI,
25862 const X86Subtarget *Subtarget) {
25864 MVT VT = N->getSimpleValueType(0);
25865 SDValue Op = N->getOperand(0);
25866 MVT OpVT = Op.getSimpleValueType();
25867 MVT OpEltVT = OpVT.getVectorElementType();
25868 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
25870 // (vzext (bitcast (vzext (x)) -> (vzext x)
25872 while (V.getOpcode() == ISD::BITCAST)
25873 V = V.getOperand(0);
25875 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
25876 MVT InnerVT = V.getSimpleValueType();
25877 MVT InnerEltVT = InnerVT.getVectorElementType();
25879 // If the element sizes match exactly, we can just do one larger vzext. This
25880 // is always an exact type match as vzext operates on integer types.
25881 if (OpEltVT == InnerEltVT) {
25882 assert(OpVT == InnerVT && "Types must match for vzext!");
25883 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
25886 // The only other way we can combine them is if only a single element of the
25887 // inner vzext is used in the input to the outer vzext.
25888 if (InnerEltVT.getSizeInBits() < InputBits)
25891 // In this case, the inner vzext is completely dead because we're going to
25892 // only look at bits inside of the low element. Just do the outer vzext on
25893 // a bitcast of the input to the inner.
25894 return DAG.getNode(X86ISD::VZEXT, DL, VT,
25895 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
25898 // Check if we can bypass extracting and re-inserting an element of an input
25899 // vector. Essentialy:
25900 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
25901 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
25902 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
25903 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
25904 SDValue ExtractedV = V.getOperand(0);
25905 SDValue OrigV = ExtractedV.getOperand(0);
25906 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
25907 if (ExtractIdx->getZExtValue() == 0) {
25908 MVT OrigVT = OrigV.getSimpleValueType();
25909 // Extract a subvector if necessary...
25910 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
25911 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
25912 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
25913 OrigVT.getVectorNumElements() / Ratio);
25914 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
25915 DAG.getIntPtrConstant(0));
25917 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
25918 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
25925 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
25926 DAGCombinerInfo &DCI) const {
25927 SelectionDAG &DAG = DCI.DAG;
25928 switch (N->getOpcode()) {
25930 case ISD::EXTRACT_VECTOR_ELT:
25931 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
25934 case X86ISD::SHRUNKBLEND:
25935 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
25936 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
25937 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
25938 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
25939 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
25940 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
25943 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
25944 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
25945 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
25946 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
25947 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
25948 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
25949 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
25950 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
25951 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, this);
25952 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
25953 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
25955 case X86ISD::FOR: return PerformFORCombine(N, DAG);
25957 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
25958 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
25959 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
25960 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
25961 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
25962 case ISD::ANY_EXTEND:
25963 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
25964 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
25965 case ISD::SIGN_EXTEND_INREG:
25966 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
25967 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
25968 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
25969 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
25970 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
25971 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
25972 case X86ISD::SHUFP: // Handle all target specific shuffles
25973 case X86ISD::PALIGNR:
25974 case X86ISD::UNPCKH:
25975 case X86ISD::UNPCKL:
25976 case X86ISD::MOVHLPS:
25977 case X86ISD::MOVLHPS:
25978 case X86ISD::PSHUFB:
25979 case X86ISD::PSHUFD:
25980 case X86ISD::PSHUFHW:
25981 case X86ISD::PSHUFLW:
25982 case X86ISD::MOVSS:
25983 case X86ISD::MOVSD:
25984 case X86ISD::VPERMILPI:
25985 case X86ISD::VPERM2X128:
25986 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
25987 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
25988 case ISD::INTRINSIC_WO_CHAIN:
25989 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
25990 case X86ISD::INSERTPS: {
25991 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
25992 return PerformINSERTPSCombine(N, DAG, Subtarget);
25995 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26001 /// isTypeDesirableForOp - Return true if the target has native support for
26002 /// the specified value type and it is 'desirable' to use the type for the
26003 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26004 /// instruction encodings are longer and some i16 instructions are slow.
26005 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26006 if (!isTypeLegal(VT))
26008 if (VT != MVT::i16)
26015 case ISD::SIGN_EXTEND:
26016 case ISD::ZERO_EXTEND:
26017 case ISD::ANY_EXTEND:
26030 /// IsDesirableToPromoteOp - This method query the target whether it is
26031 /// beneficial for dag combiner to promote the specified node. If true, it
26032 /// should return the desired promotion type by reference.
26033 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26034 EVT VT = Op.getValueType();
26035 if (VT != MVT::i16)
26038 bool Promote = false;
26039 bool Commute = false;
26040 switch (Op.getOpcode()) {
26043 LoadSDNode *LD = cast<LoadSDNode>(Op);
26044 // If the non-extending load has a single use and it's not live out, then it
26045 // might be folded.
26046 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26047 Op.hasOneUse()*/) {
26048 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26049 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26050 // The only case where we'd want to promote LOAD (rather then it being
26051 // promoted as an operand is when it's only use is liveout.
26052 if (UI->getOpcode() != ISD::CopyToReg)
26059 case ISD::SIGN_EXTEND:
26060 case ISD::ZERO_EXTEND:
26061 case ISD::ANY_EXTEND:
26066 SDValue N0 = Op.getOperand(0);
26067 // Look out for (store (shl (load), x)).
26068 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26081 SDValue N0 = Op.getOperand(0);
26082 SDValue N1 = Op.getOperand(1);
26083 if (!Commute && MayFoldLoad(N1))
26085 // Avoid disabling potential load folding opportunities.
26086 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26088 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26098 //===----------------------------------------------------------------------===//
26099 // X86 Inline Assembly Support
26100 //===----------------------------------------------------------------------===//
26103 // Helper to match a string separated by whitespace.
26104 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26105 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26107 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26108 StringRef piece(*args[i]);
26109 if (!s.startswith(piece)) // Check if the piece matches.
26112 s = s.substr(piece.size());
26113 StringRef::size_type pos = s.find_first_not_of(" \t");
26114 if (pos == 0) // We matched a prefix.
26122 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26125 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26127 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26128 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26129 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26130 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26132 if (AsmPieces.size() == 3)
26134 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26141 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26142 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26144 std::string AsmStr = IA->getAsmString();
26146 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26147 if (!Ty || Ty->getBitWidth() % 16 != 0)
26150 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26151 SmallVector<StringRef, 4> AsmPieces;
26152 SplitString(AsmStr, AsmPieces, ";\n");
26154 switch (AsmPieces.size()) {
26155 default: return false;
26157 // FIXME: this should verify that we are targeting a 486 or better. If not,
26158 // we will turn this bswap into something that will be lowered to logical
26159 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26160 // lower so don't worry about this.
26162 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26163 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26164 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26165 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26166 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26167 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26168 // No need to check constraints, nothing other than the equivalent of
26169 // "=r,0" would be valid here.
26170 return IntrinsicLowering::LowerToByteSwap(CI);
26173 // rorw $$8, ${0:w} --> llvm.bswap.i16
26174 if (CI->getType()->isIntegerTy(16) &&
26175 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26176 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26177 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26179 const std::string &ConstraintsStr = IA->getConstraintString();
26180 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26181 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26182 if (clobbersFlagRegisters(AsmPieces))
26183 return IntrinsicLowering::LowerToByteSwap(CI);
26187 if (CI->getType()->isIntegerTy(32) &&
26188 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26189 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26190 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26191 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26193 const std::string &ConstraintsStr = IA->getConstraintString();
26194 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26195 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26196 if (clobbersFlagRegisters(AsmPieces))
26197 return IntrinsicLowering::LowerToByteSwap(CI);
26200 if (CI->getType()->isIntegerTy(64)) {
26201 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26202 if (Constraints.size() >= 2 &&
26203 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26204 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26205 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26206 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26207 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26208 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26209 return IntrinsicLowering::LowerToByteSwap(CI);
26217 /// getConstraintType - Given a constraint letter, return the type of
26218 /// constraint it is for this target.
26219 X86TargetLowering::ConstraintType
26220 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26221 if (Constraint.size() == 1) {
26222 switch (Constraint[0]) {
26233 return C_RegisterClass;
26257 return TargetLowering::getConstraintType(Constraint);
26260 /// Examine constraint type and operand type and determine a weight value.
26261 /// This object must already have been set up with the operand type
26262 /// and the current alternative constraint selected.
26263 TargetLowering::ConstraintWeight
26264 X86TargetLowering::getSingleConstraintMatchWeight(
26265 AsmOperandInfo &info, const char *constraint) const {
26266 ConstraintWeight weight = CW_Invalid;
26267 Value *CallOperandVal = info.CallOperandVal;
26268 // If we don't have a value, we can't do a match,
26269 // but allow it at the lowest weight.
26270 if (!CallOperandVal)
26272 Type *type = CallOperandVal->getType();
26273 // Look at the constraint type.
26274 switch (*constraint) {
26276 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26287 if (CallOperandVal->getType()->isIntegerTy())
26288 weight = CW_SpecificReg;
26293 if (type->isFloatingPointTy())
26294 weight = CW_SpecificReg;
26297 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26298 weight = CW_SpecificReg;
26302 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26303 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26304 weight = CW_Register;
26307 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26308 if (C->getZExtValue() <= 31)
26309 weight = CW_Constant;
26313 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26314 if (C->getZExtValue() <= 63)
26315 weight = CW_Constant;
26319 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26320 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26321 weight = CW_Constant;
26325 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26326 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26327 weight = CW_Constant;
26331 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26332 if (C->getZExtValue() <= 3)
26333 weight = CW_Constant;
26337 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26338 if (C->getZExtValue() <= 0xff)
26339 weight = CW_Constant;
26344 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26345 weight = CW_Constant;
26349 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26350 if ((C->getSExtValue() >= -0x80000000LL) &&
26351 (C->getSExtValue() <= 0x7fffffffLL))
26352 weight = CW_Constant;
26356 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26357 if (C->getZExtValue() <= 0xffffffff)
26358 weight = CW_Constant;
26365 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26366 /// with another that has more specific requirements based on the type of the
26367 /// corresponding operand.
26368 const char *X86TargetLowering::
26369 LowerXConstraint(EVT ConstraintVT) const {
26370 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26371 // 'f' like normal targets.
26372 if (ConstraintVT.isFloatingPoint()) {
26373 if (Subtarget->hasSSE2())
26375 if (Subtarget->hasSSE1())
26379 return TargetLowering::LowerXConstraint(ConstraintVT);
26382 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26383 /// vector. If it is invalid, don't add anything to Ops.
26384 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26385 std::string &Constraint,
26386 std::vector<SDValue>&Ops,
26387 SelectionDAG &DAG) const {
26390 // Only support length 1 constraints for now.
26391 if (Constraint.length() > 1) return;
26393 char ConstraintLetter = Constraint[0];
26394 switch (ConstraintLetter) {
26397 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26398 if (C->getZExtValue() <= 31) {
26399 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26405 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26406 if (C->getZExtValue() <= 63) {
26407 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26413 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26414 if (isInt<8>(C->getSExtValue())) {
26415 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26421 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26422 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26423 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26424 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26430 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26431 if (C->getZExtValue() <= 3) {
26432 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26438 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26439 if (C->getZExtValue() <= 255) {
26440 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26446 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26447 if (C->getZExtValue() <= 127) {
26448 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26454 // 32-bit signed value
26455 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26456 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26457 C->getSExtValue())) {
26458 // Widen to 64 bits here to get it sign extended.
26459 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26462 // FIXME gcc accepts some relocatable values here too, but only in certain
26463 // memory models; it's complicated.
26468 // 32-bit unsigned value
26469 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26470 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26471 C->getZExtValue())) {
26472 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26476 // FIXME gcc accepts some relocatable values here too, but only in certain
26477 // memory models; it's complicated.
26481 // Literal immediates are always ok.
26482 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26483 // Widen to 64 bits here to get it sign extended.
26484 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26488 // In any sort of PIC mode addresses need to be computed at runtime by
26489 // adding in a register or some sort of table lookup. These can't
26490 // be used as immediates.
26491 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26494 // If we are in non-pic codegen mode, we allow the address of a global (with
26495 // an optional displacement) to be used with 'i'.
26496 GlobalAddressSDNode *GA = nullptr;
26497 int64_t Offset = 0;
26499 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26501 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26502 Offset += GA->getOffset();
26504 } else if (Op.getOpcode() == ISD::ADD) {
26505 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26506 Offset += C->getZExtValue();
26507 Op = Op.getOperand(0);
26510 } else if (Op.getOpcode() == ISD::SUB) {
26511 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26512 Offset += -C->getZExtValue();
26513 Op = Op.getOperand(0);
26518 // Otherwise, this isn't something we can handle, reject it.
26522 const GlobalValue *GV = GA->getGlobal();
26523 // If we require an extra load to get this address, as in PIC mode, we
26524 // can't accept it.
26525 if (isGlobalStubReference(
26526 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
26529 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
26530 GA->getValueType(0), Offset);
26535 if (Result.getNode()) {
26536 Ops.push_back(Result);
26539 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
26542 std::pair<unsigned, const TargetRegisterClass*>
26543 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
26545 // First, see if this is a constraint that directly corresponds to an LLVM
26547 if (Constraint.size() == 1) {
26548 // GCC Constraint Letters
26549 switch (Constraint[0]) {
26551 // TODO: Slight differences here in allocation order and leaving
26552 // RIP in the class. Do they matter any more here than they do
26553 // in the normal allocation?
26554 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
26555 if (Subtarget->is64Bit()) {
26556 if (VT == MVT::i32 || VT == MVT::f32)
26557 return std::make_pair(0U, &X86::GR32RegClass);
26558 if (VT == MVT::i16)
26559 return std::make_pair(0U, &X86::GR16RegClass);
26560 if (VT == MVT::i8 || VT == MVT::i1)
26561 return std::make_pair(0U, &X86::GR8RegClass);
26562 if (VT == MVT::i64 || VT == MVT::f64)
26563 return std::make_pair(0U, &X86::GR64RegClass);
26566 // 32-bit fallthrough
26567 case 'Q': // Q_REGS
26568 if (VT == MVT::i32 || VT == MVT::f32)
26569 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
26570 if (VT == MVT::i16)
26571 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
26572 if (VT == MVT::i8 || VT == MVT::i1)
26573 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
26574 if (VT == MVT::i64)
26575 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
26577 case 'r': // GENERAL_REGS
26578 case 'l': // INDEX_REGS
26579 if (VT == MVT::i8 || VT == MVT::i1)
26580 return std::make_pair(0U, &X86::GR8RegClass);
26581 if (VT == MVT::i16)
26582 return std::make_pair(0U, &X86::GR16RegClass);
26583 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
26584 return std::make_pair(0U, &X86::GR32RegClass);
26585 return std::make_pair(0U, &X86::GR64RegClass);
26586 case 'R': // LEGACY_REGS
26587 if (VT == MVT::i8 || VT == MVT::i1)
26588 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
26589 if (VT == MVT::i16)
26590 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
26591 if (VT == MVT::i32 || !Subtarget->is64Bit())
26592 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
26593 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
26594 case 'f': // FP Stack registers.
26595 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
26596 // value to the correct fpstack register class.
26597 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
26598 return std::make_pair(0U, &X86::RFP32RegClass);
26599 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
26600 return std::make_pair(0U, &X86::RFP64RegClass);
26601 return std::make_pair(0U, &X86::RFP80RegClass);
26602 case 'y': // MMX_REGS if MMX allowed.
26603 if (!Subtarget->hasMMX()) break;
26604 return std::make_pair(0U, &X86::VR64RegClass);
26605 case 'Y': // SSE_REGS if SSE2 allowed
26606 if (!Subtarget->hasSSE2()) break;
26608 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
26609 if (!Subtarget->hasSSE1()) break;
26611 switch (VT.SimpleTy) {
26613 // Scalar SSE types.
26616 return std::make_pair(0U, &X86::FR32RegClass);
26619 return std::make_pair(0U, &X86::FR64RegClass);
26627 return std::make_pair(0U, &X86::VR128RegClass);
26635 return std::make_pair(0U, &X86::VR256RegClass);
26640 return std::make_pair(0U, &X86::VR512RegClass);
26646 // Use the default implementation in TargetLowering to convert the register
26647 // constraint into a member of a register class.
26648 std::pair<unsigned, const TargetRegisterClass*> Res;
26649 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
26651 // Not found as a standard register?
26653 // Map st(0) -> st(7) -> ST0
26654 if (Constraint.size() == 7 && Constraint[0] == '{' &&
26655 tolower(Constraint[1]) == 's' &&
26656 tolower(Constraint[2]) == 't' &&
26657 Constraint[3] == '(' &&
26658 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
26659 Constraint[5] == ')' &&
26660 Constraint[6] == '}') {
26662 Res.first = X86::FP0+Constraint[4]-'0';
26663 Res.second = &X86::RFP80RegClass;
26667 // GCC allows "st(0)" to be called just plain "st".
26668 if (StringRef("{st}").equals_lower(Constraint)) {
26669 Res.first = X86::FP0;
26670 Res.second = &X86::RFP80RegClass;
26675 if (StringRef("{flags}").equals_lower(Constraint)) {
26676 Res.first = X86::EFLAGS;
26677 Res.second = &X86::CCRRegClass;
26681 // 'A' means EAX + EDX.
26682 if (Constraint == "A") {
26683 Res.first = X86::EAX;
26684 Res.second = &X86::GR32_ADRegClass;
26690 // Otherwise, check to see if this is a register class of the wrong value
26691 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
26692 // turn into {ax},{dx}.
26693 if (Res.second->hasType(VT))
26694 return Res; // Correct type already, nothing to do.
26696 // All of the single-register GCC register classes map their values onto
26697 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
26698 // really want an 8-bit or 32-bit register, map to the appropriate register
26699 // class and return the appropriate register.
26700 if (Res.second == &X86::GR16RegClass) {
26701 if (VT == MVT::i8 || VT == MVT::i1) {
26702 unsigned DestReg = 0;
26703 switch (Res.first) {
26705 case X86::AX: DestReg = X86::AL; break;
26706 case X86::DX: DestReg = X86::DL; break;
26707 case X86::CX: DestReg = X86::CL; break;
26708 case X86::BX: DestReg = X86::BL; break;
26711 Res.first = DestReg;
26712 Res.second = &X86::GR8RegClass;
26714 } else if (VT == MVT::i32 || VT == MVT::f32) {
26715 unsigned DestReg = 0;
26716 switch (Res.first) {
26718 case X86::AX: DestReg = X86::EAX; break;
26719 case X86::DX: DestReg = X86::EDX; break;
26720 case X86::CX: DestReg = X86::ECX; break;
26721 case X86::BX: DestReg = X86::EBX; break;
26722 case X86::SI: DestReg = X86::ESI; break;
26723 case X86::DI: DestReg = X86::EDI; break;
26724 case X86::BP: DestReg = X86::EBP; break;
26725 case X86::SP: DestReg = X86::ESP; break;
26728 Res.first = DestReg;
26729 Res.second = &X86::GR32RegClass;
26731 } else if (VT == MVT::i64 || VT == MVT::f64) {
26732 unsigned DestReg = 0;
26733 switch (Res.first) {
26735 case X86::AX: DestReg = X86::RAX; break;
26736 case X86::DX: DestReg = X86::RDX; break;
26737 case X86::CX: DestReg = X86::RCX; break;
26738 case X86::BX: DestReg = X86::RBX; break;
26739 case X86::SI: DestReg = X86::RSI; break;
26740 case X86::DI: DestReg = X86::RDI; break;
26741 case X86::BP: DestReg = X86::RBP; break;
26742 case X86::SP: DestReg = X86::RSP; break;
26745 Res.first = DestReg;
26746 Res.second = &X86::GR64RegClass;
26749 } else if (Res.second == &X86::FR32RegClass ||
26750 Res.second == &X86::FR64RegClass ||
26751 Res.second == &X86::VR128RegClass ||
26752 Res.second == &X86::VR256RegClass ||
26753 Res.second == &X86::FR32XRegClass ||
26754 Res.second == &X86::FR64XRegClass ||
26755 Res.second == &X86::VR128XRegClass ||
26756 Res.second == &X86::VR256XRegClass ||
26757 Res.second == &X86::VR512RegClass) {
26758 // Handle references to XMM physical registers that got mapped into the
26759 // wrong class. This can happen with constraints like {xmm0} where the
26760 // target independent register mapper will just pick the first match it can
26761 // find, ignoring the required type.
26763 if (VT == MVT::f32 || VT == MVT::i32)
26764 Res.second = &X86::FR32RegClass;
26765 else if (VT == MVT::f64 || VT == MVT::i64)
26766 Res.second = &X86::FR64RegClass;
26767 else if (X86::VR128RegClass.hasType(VT))
26768 Res.second = &X86::VR128RegClass;
26769 else if (X86::VR256RegClass.hasType(VT))
26770 Res.second = &X86::VR256RegClass;
26771 else if (X86::VR512RegClass.hasType(VT))
26772 Res.second = &X86::VR512RegClass;
26778 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
26780 // Scaling factors are not free at all.
26781 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
26782 // will take 2 allocations in the out of order engine instead of 1
26783 // for plain addressing mode, i.e. inst (reg1).
26785 // vaddps (%rsi,%drx), %ymm0, %ymm1
26786 // Requires two allocations (one for the load, one for the computation)
26788 // vaddps (%rsi), %ymm0, %ymm1
26789 // Requires just 1 allocation, i.e., freeing allocations for other operations
26790 // and having less micro operations to execute.
26792 // For some X86 architectures, this is even worse because for instance for
26793 // stores, the complex addressing mode forces the instruction to use the
26794 // "load" ports instead of the dedicated "store" port.
26795 // E.g., on Haswell:
26796 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
26797 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
26798 if (isLegalAddressingMode(AM, Ty))
26799 // Scale represents reg2 * scale, thus account for 1
26800 // as soon as we use a second register.
26801 return AM.Scale != 0;
26805 bool X86TargetLowering::isTargetFTOL() const {
26806 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();