1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<bool> ExperimentalVectorShuffleLegality(
76 "x86-experimental-vector-shuffle-legality", cl::init(false),
77 cl::desc("Enable experimental shuffle legality based on the experimental "
78 "shuffle lowering. Should only be used with the experimental "
82 static cl::opt<int> ReciprocalEstimateRefinementSteps(
83 "x86-recip-refinement-steps", cl::init(1),
84 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
85 "result of the hardware reciprocal estimate instruction."),
88 // Forward declarations.
89 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
92 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
93 SelectionDAG &DAG, SDLoc dl,
94 unsigned vectorWidth) {
95 assert((vectorWidth == 128 || vectorWidth == 256) &&
96 "Unsupported vector width");
97 EVT VT = Vec.getValueType();
98 EVT ElVT = VT.getVectorElementType();
99 unsigned Factor = VT.getSizeInBits()/vectorWidth;
100 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
101 VT.getVectorNumElements()/Factor);
103 // Extract from UNDEF is UNDEF.
104 if (Vec.getOpcode() == ISD::UNDEF)
105 return DAG.getUNDEF(ResultVT);
107 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
108 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
110 // This is the index of the first element of the vectorWidth-bit chunk
112 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
115 // If the input is a buildvector just emit a smaller one.
116 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
117 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
118 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
122 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
125 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
126 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
127 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
128 /// instructions or a simple subregister reference. Idx is an index in the
129 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
130 /// lowering EXTRACT_VECTOR_ELT operations easier.
131 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
132 SelectionDAG &DAG, SDLoc dl) {
133 assert((Vec.getValueType().is256BitVector() ||
134 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
138 /// Generate a DAG to grab 256-bits from a 512-bit vector.
139 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
140 SelectionDAG &DAG, SDLoc dl) {
141 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
142 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
145 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
146 unsigned IdxVal, SelectionDAG &DAG,
147 SDLoc dl, unsigned vectorWidth) {
148 assert((vectorWidth == 128 || vectorWidth == 256) &&
149 "Unsupported vector width");
150 // Inserting UNDEF is Result
151 if (Vec.getOpcode() == ISD::UNDEF)
153 EVT VT = Vec.getValueType();
154 EVT ElVT = VT.getVectorElementType();
155 EVT ResultVT = Result.getValueType();
157 // Insert the relevant vectorWidth bits.
158 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
160 // This is the index of the first element of the vectorWidth-bit chunk
162 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
165 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
166 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
169 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
170 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
171 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
172 /// simple superregister reference. Idx is an index in the 128 bits
173 /// we want. It need not be aligned to a 128-bit boundary. That makes
174 /// lowering INSERT_VECTOR_ELT operations easier.
175 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
176 SelectionDAG &DAG,SDLoc dl) {
177 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
178 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
181 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
182 SelectionDAG &DAG, SDLoc dl) {
183 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
184 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
187 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
188 /// instructions. This is used because creating CONCAT_VECTOR nodes of
189 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
190 /// large BUILD_VECTORS.
191 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
198 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
199 unsigned NumElems, SelectionDAG &DAG,
201 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
202 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
205 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
206 const X86Subtarget &STI)
207 : TargetLowering(TM), Subtarget(&STI) {
208 X86ScalarSSEf64 = Subtarget->hasSSE2();
209 X86ScalarSSEf32 = Subtarget->hasSSE1();
210 TD = getDataLayout();
212 // Set up the TargetLowering object.
213 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
215 // X86 is weird. It always uses i8 for shift amounts and setcc results.
216 setBooleanContents(ZeroOrOneBooleanContent);
217 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
218 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
220 // For 64-bit, since we have so many registers, use the ILP scheduler.
221 // For 32-bit, use the register pressure specific scheduling.
222 // For Atom, always use ILP scheduling.
223 if (Subtarget->isAtom())
224 setSchedulingPreference(Sched::ILP);
225 else if (Subtarget->is64Bit())
226 setSchedulingPreference(Sched::ILP);
228 setSchedulingPreference(Sched::RegPressure);
229 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
230 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
232 // Bypass expensive divides on Atom when compiling with O2.
233 if (TM.getOptLevel() >= CodeGenOpt::Default) {
234 if (Subtarget->hasSlowDivide32())
235 addBypassSlowDiv(32, 8);
236 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
237 addBypassSlowDiv(64, 16);
240 if (Subtarget->isTargetKnownWindowsMSVC()) {
241 // Setup Windows compiler runtime calls.
242 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
243 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
244 setLibcallName(RTLIB::SREM_I64, "_allrem");
245 setLibcallName(RTLIB::UREM_I64, "_aullrem");
246 setLibcallName(RTLIB::MUL_I64, "_allmul");
247 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
248 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
249 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
250 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
251 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
253 // The _ftol2 runtime function has an unusual calling conv, which
254 // is modeled by a special pseudo-instruction.
255 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
256 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
257 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
258 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
261 if (Subtarget->isTargetDarwin()) {
262 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
263 setUseUnderscoreSetJmp(false);
264 setUseUnderscoreLongJmp(false);
265 } else if (Subtarget->isTargetWindowsGNU()) {
266 // MS runtime is weird: it exports _setjmp, but longjmp!
267 setUseUnderscoreSetJmp(true);
268 setUseUnderscoreLongJmp(false);
270 setUseUnderscoreSetJmp(true);
271 setUseUnderscoreLongJmp(true);
274 // Set up the register classes.
275 addRegisterClass(MVT::i8, &X86::GR8RegClass);
276 addRegisterClass(MVT::i16, &X86::GR16RegClass);
277 addRegisterClass(MVT::i32, &X86::GR32RegClass);
278 if (Subtarget->is64Bit())
279 addRegisterClass(MVT::i64, &X86::GR64RegClass);
281 for (MVT VT : MVT::integer_valuetypes())
282 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
284 // We don't accept any truncstore of integer registers.
285 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
286 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
287 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
288 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
289 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
290 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
292 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
294 // SETOEQ and SETUNE require checking two conditions.
295 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
296 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
297 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
298 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
299 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
300 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
302 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
304 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
305 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
306 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
308 if (Subtarget->is64Bit()) {
309 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
310 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
311 } else if (!TM.Options.UseSoftFloat) {
312 // We have an algorithm for SSE2->double, and we turn this into a
313 // 64-bit FILD followed by conditional FADD for other targets.
314 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
315 // We have an algorithm for SSE2, and we turn this into a 64-bit
316 // FILD for other targets.
317 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
320 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
322 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
323 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
325 if (!TM.Options.UseSoftFloat) {
326 // SSE has no i16 to fp conversion, only i32
327 if (X86ScalarSSEf32) {
328 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
329 // f32 and f64 cases are Legal, f80 case is not
330 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
332 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
333 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
336 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
337 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
340 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
341 // are Legal, f80 is custom lowered.
342 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
343 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
345 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
347 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
348 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
350 if (X86ScalarSSEf32) {
351 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
352 // f32 and f64 cases are Legal, f80 case is not
353 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
355 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
356 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
359 // Handle FP_TO_UINT by promoting the destination to a larger signed
361 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
362 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
363 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
365 if (Subtarget->is64Bit()) {
366 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
367 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
368 } else if (!TM.Options.UseSoftFloat) {
369 // Since AVX is a superset of SSE3, only check for SSE here.
370 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
371 // Expand FP_TO_UINT into a select.
372 // FIXME: We would like to use a Custom expander here eventually to do
373 // the optimal thing for SSE vs. the default expansion in the legalizer.
374 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
376 // With SSE3 we can use fisttpll to convert to a signed i64; without
377 // SSE, we're stuck with a fistpll.
378 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
381 if (isTargetFTOL()) {
382 // Use the _ftol2 runtime function, which has a pseudo-instruction
383 // to handle its weird calling convention.
384 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
387 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
388 if (!X86ScalarSSEf64) {
389 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
390 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
391 if (Subtarget->is64Bit()) {
392 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
393 // Without SSE, i64->f64 goes through memory.
394 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
398 // Scalar integer divide and remainder are lowered to use operations that
399 // produce two results, to match the available instructions. This exposes
400 // the two-result form to trivial CSE, which is able to combine x/y and x%y
401 // into a single instruction.
403 // Scalar integer multiply-high is also lowered to use two-result
404 // operations, to match the available instructions. However, plain multiply
405 // (low) operations are left as Legal, as there are single-result
406 // instructions for this in x86. Using the two-result multiply instructions
407 // when both high and low results are needed must be arranged by dagcombine.
408 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
410 setOperationAction(ISD::MULHS, VT, Expand);
411 setOperationAction(ISD::MULHU, VT, Expand);
412 setOperationAction(ISD::SDIV, VT, Expand);
413 setOperationAction(ISD::UDIV, VT, Expand);
414 setOperationAction(ISD::SREM, VT, Expand);
415 setOperationAction(ISD::UREM, VT, Expand);
417 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
418 setOperationAction(ISD::ADDC, VT, Custom);
419 setOperationAction(ISD::ADDE, VT, Custom);
420 setOperationAction(ISD::SUBC, VT, Custom);
421 setOperationAction(ISD::SUBE, VT, Custom);
424 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
425 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
426 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
427 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
428 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
429 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
430 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
431 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
432 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
433 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
434 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
435 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
436 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
437 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
438 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
439 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
440 if (Subtarget->is64Bit())
441 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
442 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
443 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
444 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
445 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
446 setOperationAction(ISD::FREM , MVT::f32 , Expand);
447 setOperationAction(ISD::FREM , MVT::f64 , Expand);
448 setOperationAction(ISD::FREM , MVT::f80 , Expand);
449 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
451 // Promote the i8 variants and force them on up to i32 which has a shorter
453 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
454 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
455 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
456 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
457 if (Subtarget->hasBMI()) {
458 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
459 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
460 if (Subtarget->is64Bit())
461 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
463 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
464 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
465 if (Subtarget->is64Bit())
466 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
469 if (Subtarget->hasLZCNT()) {
470 // When promoting the i8 variants, force them to i32 for a shorter
472 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
473 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
478 if (Subtarget->is64Bit())
479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
481 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
482 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
483 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
484 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
485 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
486 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
487 if (Subtarget->is64Bit()) {
488 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
489 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
493 // Special handling for half-precision floating point conversions.
494 // If we don't have F16C support, then lower half float conversions
495 // into library calls.
496 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
497 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
498 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
501 // There's never any support for operations beyond MVT::f32.
502 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
503 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
504 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
505 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
507 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
508 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
509 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
510 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
511 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
512 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
514 if (Subtarget->hasPOPCNT()) {
515 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
517 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
518 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
519 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
520 if (Subtarget->is64Bit())
521 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
524 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
526 if (!Subtarget->hasMOVBE())
527 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
529 // These should be promoted to a larger select which is supported.
530 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
531 // X86 wants to expand cmov itself.
532 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
533 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
534 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
535 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
536 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
537 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
538 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
539 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
540 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
541 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
542 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
543 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
544 if (Subtarget->is64Bit()) {
545 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
546 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
548 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
549 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
550 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
551 // support continuation, user-level threading, and etc.. As a result, no
552 // other SjLj exception interfaces are implemented and please don't build
553 // your own exception handling based on them.
554 // LLVM/Clang supports zero-cost DWARF exception handling.
555 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
556 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
559 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
560 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
561 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
562 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
563 if (Subtarget->is64Bit())
564 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
565 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
566 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
567 if (Subtarget->is64Bit()) {
568 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
569 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
570 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
571 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
572 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
574 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
575 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
576 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
577 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
578 if (Subtarget->is64Bit()) {
579 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
580 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
581 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
584 if (Subtarget->hasSSE1())
585 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
587 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
589 // Expand certain atomics
590 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
592 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
593 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
594 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
597 if (Subtarget->hasCmpxchg16b()) {
598 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
601 // FIXME - use subtarget debug flags
602 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
603 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
604 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
607 if (Subtarget->is64Bit()) {
608 setExceptionPointerRegister(X86::RAX);
609 setExceptionSelectorRegister(X86::RDX);
611 setExceptionPointerRegister(X86::EAX);
612 setExceptionSelectorRegister(X86::EDX);
614 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
615 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
617 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
618 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
620 setOperationAction(ISD::TRAP, MVT::Other, Legal);
621 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
623 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
624 setOperationAction(ISD::VASTART , MVT::Other, Custom);
625 setOperationAction(ISD::VAEND , MVT::Other, Expand);
626 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
627 // TargetInfo::X86_64ABIBuiltinVaList
628 setOperationAction(ISD::VAARG , MVT::Other, Custom);
629 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
631 // TargetInfo::CharPtrBuiltinVaList
632 setOperationAction(ISD::VAARG , MVT::Other, Expand);
633 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
636 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
637 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
639 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
641 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
642 // f32 and f64 use SSE.
643 // Set up the FP register classes.
644 addRegisterClass(MVT::f32, &X86::FR32RegClass);
645 addRegisterClass(MVT::f64, &X86::FR64RegClass);
647 // Use ANDPD to simulate FABS.
648 setOperationAction(ISD::FABS , MVT::f64, Custom);
649 setOperationAction(ISD::FABS , MVT::f32, Custom);
651 // Use XORP to simulate FNEG.
652 setOperationAction(ISD::FNEG , MVT::f64, Custom);
653 setOperationAction(ISD::FNEG , MVT::f32, Custom);
655 // Use ANDPD and ORPD to simulate FCOPYSIGN.
656 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
657 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
659 // Lower this to FGETSIGNx86 plus an AND.
660 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
661 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
663 // We don't support sin/cos/fmod
664 setOperationAction(ISD::FSIN , MVT::f64, Expand);
665 setOperationAction(ISD::FCOS , MVT::f64, Expand);
666 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
667 setOperationAction(ISD::FSIN , MVT::f32, Expand);
668 setOperationAction(ISD::FCOS , MVT::f32, Expand);
669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
671 // Expand FP immediates into loads from the stack, except for the special
673 addLegalFPImmediate(APFloat(+0.0)); // xorpd
674 addLegalFPImmediate(APFloat(+0.0f)); // xorps
675 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
676 // Use SSE for f32, x87 for f64.
677 // Set up the FP register classes.
678 addRegisterClass(MVT::f32, &X86::FR32RegClass);
679 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
681 // Use ANDPS to simulate FABS.
682 setOperationAction(ISD::FABS , MVT::f32, Custom);
684 // Use XORP to simulate FNEG.
685 setOperationAction(ISD::FNEG , MVT::f32, Custom);
687 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
689 // Use ANDPS and ORPS to simulate FCOPYSIGN.
690 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
691 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
693 // We don't support sin/cos/fmod
694 setOperationAction(ISD::FSIN , MVT::f32, Expand);
695 setOperationAction(ISD::FCOS , MVT::f32, Expand);
696 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
698 // Special cases we handle for FP constants.
699 addLegalFPImmediate(APFloat(+0.0f)); // xorps
700 addLegalFPImmediate(APFloat(+0.0)); // FLD0
701 addLegalFPImmediate(APFloat(+1.0)); // FLD1
702 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
703 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
705 if (!TM.Options.UnsafeFPMath) {
706 setOperationAction(ISD::FSIN , MVT::f64, Expand);
707 setOperationAction(ISD::FCOS , MVT::f64, Expand);
708 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
710 } else if (!TM.Options.UseSoftFloat) {
711 // f32 and f64 in x87.
712 // Set up the FP register classes.
713 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
714 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
716 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
717 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
718 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
719 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
721 if (!TM.Options.UnsafeFPMath) {
722 setOperationAction(ISD::FSIN , MVT::f64, Expand);
723 setOperationAction(ISD::FSIN , MVT::f32, Expand);
724 setOperationAction(ISD::FCOS , MVT::f64, Expand);
725 setOperationAction(ISD::FCOS , MVT::f32, Expand);
726 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
727 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
729 addLegalFPImmediate(APFloat(+0.0)); // FLD0
730 addLegalFPImmediate(APFloat(+1.0)); // FLD1
731 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
732 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
733 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
734 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
735 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
736 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
739 // We don't support FMA.
740 setOperationAction(ISD::FMA, MVT::f64, Expand);
741 setOperationAction(ISD::FMA, MVT::f32, Expand);
743 // Long double always uses X87.
744 if (!TM.Options.UseSoftFloat) {
745 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
746 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
747 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
749 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
750 addLegalFPImmediate(TmpFlt); // FLD0
752 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
755 APFloat TmpFlt2(+1.0);
756 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
758 addLegalFPImmediate(TmpFlt2); // FLD1
759 TmpFlt2.changeSign();
760 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
763 if (!TM.Options.UnsafeFPMath) {
764 setOperationAction(ISD::FSIN , MVT::f80, Expand);
765 setOperationAction(ISD::FCOS , MVT::f80, Expand);
766 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
769 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
770 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
771 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
772 setOperationAction(ISD::FRINT, MVT::f80, Expand);
773 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
774 setOperationAction(ISD::FMA, MVT::f80, Expand);
777 // Always use a library call for pow.
778 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
779 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
780 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
782 setOperationAction(ISD::FLOG, MVT::f80, Expand);
783 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
784 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
785 setOperationAction(ISD::FEXP, MVT::f80, Expand);
786 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
787 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
788 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
790 // First set operation action for all vector types to either promote
791 // (for widening) or expand (for scalarization). Then we will selectively
792 // turn on ones that can be effectively codegen'd.
793 for (MVT VT : MVT::vector_valuetypes()) {
794 setOperationAction(ISD::ADD , VT, Expand);
795 setOperationAction(ISD::SUB , VT, Expand);
796 setOperationAction(ISD::FADD, VT, Expand);
797 setOperationAction(ISD::FNEG, VT, Expand);
798 setOperationAction(ISD::FSUB, VT, Expand);
799 setOperationAction(ISD::MUL , VT, Expand);
800 setOperationAction(ISD::FMUL, VT, Expand);
801 setOperationAction(ISD::SDIV, VT, Expand);
802 setOperationAction(ISD::UDIV, VT, Expand);
803 setOperationAction(ISD::FDIV, VT, Expand);
804 setOperationAction(ISD::SREM, VT, Expand);
805 setOperationAction(ISD::UREM, VT, Expand);
806 setOperationAction(ISD::LOAD, VT, Expand);
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
808 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
809 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
810 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
811 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
812 setOperationAction(ISD::FABS, VT, Expand);
813 setOperationAction(ISD::FSIN, VT, Expand);
814 setOperationAction(ISD::FSINCOS, VT, Expand);
815 setOperationAction(ISD::FCOS, VT, Expand);
816 setOperationAction(ISD::FSINCOS, VT, Expand);
817 setOperationAction(ISD::FREM, VT, Expand);
818 setOperationAction(ISD::FMA, VT, Expand);
819 setOperationAction(ISD::FPOWI, VT, Expand);
820 setOperationAction(ISD::FSQRT, VT, Expand);
821 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
822 setOperationAction(ISD::FFLOOR, VT, Expand);
823 setOperationAction(ISD::FCEIL, VT, Expand);
824 setOperationAction(ISD::FTRUNC, VT, Expand);
825 setOperationAction(ISD::FRINT, VT, Expand);
826 setOperationAction(ISD::FNEARBYINT, VT, Expand);
827 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
828 setOperationAction(ISD::MULHS, VT, Expand);
829 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
830 setOperationAction(ISD::MULHU, VT, Expand);
831 setOperationAction(ISD::SDIVREM, VT, Expand);
832 setOperationAction(ISD::UDIVREM, VT, Expand);
833 setOperationAction(ISD::FPOW, VT, Expand);
834 setOperationAction(ISD::CTPOP, VT, Expand);
835 setOperationAction(ISD::CTTZ, VT, Expand);
836 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
837 setOperationAction(ISD::CTLZ, VT, Expand);
838 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
839 setOperationAction(ISD::SHL, VT, Expand);
840 setOperationAction(ISD::SRA, VT, Expand);
841 setOperationAction(ISD::SRL, VT, Expand);
842 setOperationAction(ISD::ROTL, VT, Expand);
843 setOperationAction(ISD::ROTR, VT, Expand);
844 setOperationAction(ISD::BSWAP, VT, Expand);
845 setOperationAction(ISD::SETCC, VT, Expand);
846 setOperationAction(ISD::FLOG, VT, Expand);
847 setOperationAction(ISD::FLOG2, VT, Expand);
848 setOperationAction(ISD::FLOG10, VT, Expand);
849 setOperationAction(ISD::FEXP, VT, Expand);
850 setOperationAction(ISD::FEXP2, VT, Expand);
851 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
852 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
853 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
854 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
855 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
856 setOperationAction(ISD::TRUNCATE, VT, Expand);
857 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
858 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
859 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
860 setOperationAction(ISD::VSELECT, VT, Expand);
861 setOperationAction(ISD::SELECT_CC, VT, Expand);
862 for (MVT InnerVT : MVT::vector_valuetypes()) {
863 setTruncStoreAction(InnerVT, VT, Expand);
865 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
866 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
868 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
869 // types, we have to deal with them whether we ask for Expansion or not.
870 // Setting Expand causes its own optimisation problems though, so leave
872 if (VT.getVectorElementType() == MVT::i1)
873 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
877 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
878 // with -msoft-float, disable use of MMX as well.
879 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
880 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
881 // No operations on x86mmx supported, everything uses intrinsics.
884 // MMX-sized vectors (other than x86mmx) are expected to be expanded
885 // into smaller operations.
886 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
887 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
888 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
889 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
890 setOperationAction(ISD::AND, MVT::v8i8, Expand);
891 setOperationAction(ISD::AND, MVT::v4i16, Expand);
892 setOperationAction(ISD::AND, MVT::v2i32, Expand);
893 setOperationAction(ISD::AND, MVT::v1i64, Expand);
894 setOperationAction(ISD::OR, MVT::v8i8, Expand);
895 setOperationAction(ISD::OR, MVT::v4i16, Expand);
896 setOperationAction(ISD::OR, MVT::v2i32, Expand);
897 setOperationAction(ISD::OR, MVT::v1i64, Expand);
898 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
899 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
900 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
901 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
902 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
903 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
904 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
905 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
907 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
908 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
909 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
910 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
911 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
912 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
913 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
914 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
916 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
917 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
919 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
920 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
921 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
922 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
923 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
924 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
925 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
926 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
927 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
928 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
929 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
930 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
931 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
934 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
935 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
937 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
938 // registers cannot be used even for integer operations.
939 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
940 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
941 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
942 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
944 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
945 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
946 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
947 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
948 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
949 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
950 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
951 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
952 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
953 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
954 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
955 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
956 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
957 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
958 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
959 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
960 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
961 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
962 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
963 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
964 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
965 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
967 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
968 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
969 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
970 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
972 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
973 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
974 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
976 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
978 // Only provide customized ctpop vector bit twiddling for vector types we
979 // know to perform better than using the popcnt instructions on each vector
980 // element. If popcnt isn't supported, always provide the custom version.
981 if (!Subtarget->hasPOPCNT()) {
982 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
983 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
986 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
987 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
988 MVT VT = (MVT::SimpleValueType)i;
989 // Do not attempt to custom lower non-power-of-2 vectors
990 if (!isPowerOf2_32(VT.getVectorNumElements()))
992 // Do not attempt to custom lower non-128-bit vectors
993 if (!VT.is128BitVector())
995 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
996 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
997 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1000 // We support custom legalizing of sext and anyext loads for specific
1001 // memory vector types which we can load as a scalar (or sequence of
1002 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1003 // loads these must work with a single scalar load.
1004 for (MVT VT : MVT::integer_vector_valuetypes()) {
1005 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1006 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1007 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1008 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1009 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1010 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1011 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1012 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1013 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1016 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1017 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1018 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1019 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1020 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1021 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1023 if (Subtarget->is64Bit()) {
1024 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1025 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1028 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1029 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1030 MVT VT = (MVT::SimpleValueType)i;
1032 // Do not attempt to promote non-128-bit vectors
1033 if (!VT.is128BitVector())
1036 setOperationAction(ISD::AND, VT, Promote);
1037 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1038 setOperationAction(ISD::OR, VT, Promote);
1039 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1040 setOperationAction(ISD::XOR, VT, Promote);
1041 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1042 setOperationAction(ISD::LOAD, VT, Promote);
1043 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1044 setOperationAction(ISD::SELECT, VT, Promote);
1045 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1048 // Custom lower v2i64 and v2f64 selects.
1049 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1050 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1051 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1052 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1054 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1055 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1057 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1058 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1059 // As there is no 64-bit GPR available, we need build a special custom
1060 // sequence to convert from v2i32 to v2f32.
1061 if (!Subtarget->is64Bit())
1062 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1064 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1065 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1067 for (MVT VT : MVT::fp_vector_valuetypes())
1068 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1070 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1071 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1072 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1075 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1076 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1077 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1078 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1079 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1080 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1081 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1082 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1083 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1084 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1085 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1087 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1088 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1089 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1090 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1091 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1092 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1093 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1094 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1095 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1096 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1098 // FIXME: Do we need to handle scalar-to-vector here?
1099 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1101 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1102 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1103 setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
1104 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
1105 setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
1106 // There is no BLENDI for byte vectors. We don't need to custom lower
1107 // some vselects for now.
1108 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1110 // SSE41 brings specific instructions for doing vector sign extend even in
1111 // cases where we don't have SRA.
1112 for (MVT VT : MVT::integer_vector_valuetypes()) {
1113 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1114 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1115 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1118 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1119 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1120 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1121 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1122 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1123 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1124 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1126 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1127 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1128 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1129 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1130 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1131 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1133 // i8 and i16 vectors are custom because the source register and source
1134 // source memory operand types are not the same width. f32 vectors are
1135 // custom since the immediate controlling the insert encodes additional
1137 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1138 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1139 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1140 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1142 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1144 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1145 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1147 // FIXME: these should be Legal, but that's only for the case where
1148 // the index is constant. For now custom expand to deal with that.
1149 if (Subtarget->is64Bit()) {
1150 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1151 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1155 if (Subtarget->hasSSE2()) {
1156 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1157 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1159 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1160 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1162 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1163 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1165 // In the customized shift lowering, the legal cases in AVX2 will be
1167 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1168 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1170 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1171 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1173 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1176 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1177 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1178 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1179 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1180 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1181 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1182 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1184 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1185 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1186 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1188 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1190 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1191 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1192 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1193 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1194 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1195 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1196 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1197 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1198 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1199 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1201 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1202 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1203 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1204 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1205 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1206 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1207 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1209 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1210 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1211 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1212 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1214 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1215 // even though v8i16 is a legal type.
1216 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1217 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1218 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1220 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1221 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1222 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1224 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1225 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1227 for (MVT VT : MVT::fp_vector_valuetypes())
1228 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1230 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1231 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1233 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1234 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1236 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1237 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1239 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1240 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1241 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1242 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1244 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1245 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1246 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1248 setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
1249 setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
1250 setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
1251 setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
1253 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1254 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1255 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1256 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1257 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1258 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1259 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1260 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1261 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1262 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1263 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1264 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1266 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1267 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1268 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1269 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1270 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1271 setOperationAction(ISD::FMA, MVT::f32, Legal);
1272 setOperationAction(ISD::FMA, MVT::f64, Legal);
1275 if (Subtarget->hasInt256()) {
1276 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1277 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1278 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1279 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1281 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1282 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1283 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1284 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1286 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1287 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1288 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1289 // Don't lower v32i8 because there is no 128-bit byte mul
1291 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1292 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1293 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1294 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1296 setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
1297 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1299 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1300 // when we have a 256bit-wide blend with immediate.
1301 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1303 // Only provide customized ctpop vector bit twiddling for vector types we
1304 // know to perform better than using the popcnt instructions on each
1305 // vector element. If popcnt isn't supported, always provide the custom
1307 if (!Subtarget->hasPOPCNT())
1308 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1310 // Custom CTPOP always performs better on natively supported v8i32
1311 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1313 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1314 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1315 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1316 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1317 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1318 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1319 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1321 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1322 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1323 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1324 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1325 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1326 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1328 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1329 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1330 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1331 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1333 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1334 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1335 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1336 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1338 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1339 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1340 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1341 // Don't lower v32i8 because there is no 128-bit byte mul
1344 // In the customized shift lowering, the legal cases in AVX2 will be
1346 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1347 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1349 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1350 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1352 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1354 // Custom lower several nodes for 256-bit types.
1355 for (MVT VT : MVT::vector_valuetypes()) {
1356 if (VT.getScalarSizeInBits() >= 32) {
1357 setOperationAction(ISD::MLOAD, VT, Legal);
1358 setOperationAction(ISD::MSTORE, VT, Legal);
1360 // Extract subvector is special because the value type
1361 // (result) is 128-bit but the source is 256-bit wide.
1362 if (VT.is128BitVector()) {
1363 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1365 // Do not attempt to custom lower other non-256-bit vectors
1366 if (!VT.is256BitVector())
1369 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1370 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1371 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1372 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1373 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1374 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1375 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1378 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1379 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1380 MVT VT = (MVT::SimpleValueType)i;
1382 // Do not attempt to promote non-256-bit vectors
1383 if (!VT.is256BitVector())
1386 setOperationAction(ISD::AND, VT, Promote);
1387 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1388 setOperationAction(ISD::OR, VT, Promote);
1389 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1390 setOperationAction(ISD::XOR, VT, Promote);
1391 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1392 setOperationAction(ISD::LOAD, VT, Promote);
1393 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1394 setOperationAction(ISD::SELECT, VT, Promote);
1395 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1399 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1400 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1401 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1402 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1403 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1405 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1406 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1407 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1409 for (MVT VT : MVT::fp_vector_valuetypes())
1410 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1412 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1413 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1414 setOperationAction(ISD::XOR, MVT::i1, Legal);
1415 setOperationAction(ISD::OR, MVT::i1, Legal);
1416 setOperationAction(ISD::AND, MVT::i1, Legal);
1417 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1418 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1419 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1420 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1421 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1423 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1424 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1425 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1426 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1427 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1428 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1430 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1431 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1432 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1433 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1434 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1435 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1436 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1437 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1439 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1440 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1441 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1442 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1443 if (Subtarget->is64Bit()) {
1444 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1445 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1446 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1447 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1449 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1450 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1451 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1452 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1453 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1454 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1455 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1456 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1457 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1458 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1459 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1460 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1461 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1462 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1464 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1465 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1466 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1467 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1468 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1469 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1470 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1471 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1472 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1473 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1474 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1475 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1476 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1478 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1479 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1480 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1481 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1482 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1483 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1485 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1486 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1488 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1490 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1491 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1492 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1493 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1494 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1495 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1496 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1497 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1498 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1500 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1501 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1503 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1504 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1506 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1508 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1509 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1511 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1512 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1514 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1515 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1517 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1518 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1519 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1520 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1521 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1522 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1524 if (Subtarget->hasCDI()) {
1525 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1526 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1529 // Custom lower several nodes.
1530 for (MVT VT : MVT::vector_valuetypes()) {
1531 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1532 // Extract subvector is special because the value type
1533 // (result) is 256/128-bit but the source is 512-bit wide.
1534 if (VT.is128BitVector() || VT.is256BitVector()) {
1535 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1537 if (VT.getVectorElementType() == MVT::i1)
1538 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1540 // Do not attempt to custom lower other non-512-bit vectors
1541 if (!VT.is512BitVector())
1544 if ( EltSize >= 32) {
1545 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1546 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1547 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1548 setOperationAction(ISD::VSELECT, VT, Legal);
1549 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1550 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1551 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1552 setOperationAction(ISD::MLOAD, VT, Legal);
1553 setOperationAction(ISD::MSTORE, VT, Legal);
1556 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1557 MVT VT = (MVT::SimpleValueType)i;
1559 // Do not attempt to promote non-512-bit vectors.
1560 if (!VT.is512BitVector())
1563 setOperationAction(ISD::SELECT, VT, Promote);
1564 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1568 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1569 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1570 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1572 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1573 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1575 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1576 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1577 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1578 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1579 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1580 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1581 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1582 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1583 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1585 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1586 const MVT VT = (MVT::SimpleValueType)i;
1588 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1590 // Do not attempt to promote non-512-bit vectors.
1591 if (!VT.is512BitVector())
1595 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1596 setOperationAction(ISD::VSELECT, VT, Legal);
1601 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1602 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1603 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1605 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1606 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1607 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1609 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1610 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1611 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1612 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1613 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1614 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1617 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1618 // of this type with custom code.
1619 for (MVT VT : MVT::vector_valuetypes())
1620 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1622 // We want to custom lower some of our intrinsics.
1623 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1624 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1625 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1626 if (!Subtarget->is64Bit())
1627 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1629 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1630 // handle type legalization for these operations here.
1632 // FIXME: We really should do custom legalization for addition and
1633 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1634 // than generic legalization for 64-bit multiplication-with-overflow, though.
1635 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1636 // Add/Sub/Mul with overflow operations are custom lowered.
1638 setOperationAction(ISD::SADDO, VT, Custom);
1639 setOperationAction(ISD::UADDO, VT, Custom);
1640 setOperationAction(ISD::SSUBO, VT, Custom);
1641 setOperationAction(ISD::USUBO, VT, Custom);
1642 setOperationAction(ISD::SMULO, VT, Custom);
1643 setOperationAction(ISD::UMULO, VT, Custom);
1647 if (!Subtarget->is64Bit()) {
1648 // These libcalls are not available in 32-bit.
1649 setLibcallName(RTLIB::SHL_I128, nullptr);
1650 setLibcallName(RTLIB::SRL_I128, nullptr);
1651 setLibcallName(RTLIB::SRA_I128, nullptr);
1654 // Combine sin / cos into one node or libcall if possible.
1655 if (Subtarget->hasSinCos()) {
1656 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1657 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1658 if (Subtarget->isTargetDarwin()) {
1659 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1660 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1661 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1662 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1666 if (Subtarget->isTargetWin64()) {
1667 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1668 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1669 setOperationAction(ISD::SREM, MVT::i128, Custom);
1670 setOperationAction(ISD::UREM, MVT::i128, Custom);
1671 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1672 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1675 // We have target-specific dag combine patterns for the following nodes:
1676 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1677 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1678 setTargetDAGCombine(ISD::VSELECT);
1679 setTargetDAGCombine(ISD::SELECT);
1680 setTargetDAGCombine(ISD::SHL);
1681 setTargetDAGCombine(ISD::SRA);
1682 setTargetDAGCombine(ISD::SRL);
1683 setTargetDAGCombine(ISD::OR);
1684 setTargetDAGCombine(ISD::AND);
1685 setTargetDAGCombine(ISD::ADD);
1686 setTargetDAGCombine(ISD::FADD);
1687 setTargetDAGCombine(ISD::FSUB);
1688 setTargetDAGCombine(ISD::FMA);
1689 setTargetDAGCombine(ISD::SUB);
1690 setTargetDAGCombine(ISD::LOAD);
1691 setTargetDAGCombine(ISD::MLOAD);
1692 setTargetDAGCombine(ISD::STORE);
1693 setTargetDAGCombine(ISD::MSTORE);
1694 setTargetDAGCombine(ISD::ZERO_EXTEND);
1695 setTargetDAGCombine(ISD::ANY_EXTEND);
1696 setTargetDAGCombine(ISD::SIGN_EXTEND);
1697 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1698 setTargetDAGCombine(ISD::TRUNCATE);
1699 setTargetDAGCombine(ISD::SINT_TO_FP);
1700 setTargetDAGCombine(ISD::SETCC);
1701 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1702 setTargetDAGCombine(ISD::BUILD_VECTOR);
1703 setTargetDAGCombine(ISD::MUL);
1704 setTargetDAGCombine(ISD::XOR);
1706 computeRegisterProperties();
1708 // On Darwin, -Os means optimize for size without hurting performance,
1709 // do not reduce the limit.
1710 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1711 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1712 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1713 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1714 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1715 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1716 setPrefLoopAlignment(4); // 2^4 bytes.
1718 // Predictable cmov don't hurt on atom because it's in-order.
1719 PredictableSelectIsExpensive = !Subtarget->isAtom();
1720 EnableExtLdPromotion = true;
1721 setPrefFunctionAlignment(4); // 2^4 bytes.
1723 verifyIntrinsicTables();
1726 // This has so far only been implemented for 64-bit MachO.
1727 bool X86TargetLowering::useLoadStackGuardNode() const {
1728 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1731 TargetLoweringBase::LegalizeTypeAction
1732 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1733 if (ExperimentalVectorWideningLegalization &&
1734 VT.getVectorNumElements() != 1 &&
1735 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1736 return TypeWidenVector;
1738 return TargetLoweringBase::getPreferredVectorAction(VT);
1741 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1743 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1745 const unsigned NumElts = VT.getVectorNumElements();
1746 const EVT EltVT = VT.getVectorElementType();
1747 if (VT.is512BitVector()) {
1748 if (Subtarget->hasAVX512())
1749 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1750 EltVT == MVT::f32 || EltVT == MVT::f64)
1752 case 8: return MVT::v8i1;
1753 case 16: return MVT::v16i1;
1755 if (Subtarget->hasBWI())
1756 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1758 case 32: return MVT::v32i1;
1759 case 64: return MVT::v64i1;
1763 if (VT.is256BitVector() || VT.is128BitVector()) {
1764 if (Subtarget->hasVLX())
1765 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1766 EltVT == MVT::f32 || EltVT == MVT::f64)
1768 case 2: return MVT::v2i1;
1769 case 4: return MVT::v4i1;
1770 case 8: return MVT::v8i1;
1772 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1773 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1775 case 8: return MVT::v8i1;
1776 case 16: return MVT::v16i1;
1777 case 32: return MVT::v32i1;
1781 return VT.changeVectorElementTypeToInteger();
1784 /// Helper for getByValTypeAlignment to determine
1785 /// the desired ByVal argument alignment.
1786 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1789 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1790 if (VTy->getBitWidth() == 128)
1792 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1793 unsigned EltAlign = 0;
1794 getMaxByValAlign(ATy->getElementType(), EltAlign);
1795 if (EltAlign > MaxAlign)
1796 MaxAlign = EltAlign;
1797 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1798 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1799 unsigned EltAlign = 0;
1800 getMaxByValAlign(STy->getElementType(i), EltAlign);
1801 if (EltAlign > MaxAlign)
1802 MaxAlign = EltAlign;
1809 /// Return the desired alignment for ByVal aggregate
1810 /// function arguments in the caller parameter area. For X86, aggregates
1811 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1812 /// are at 4-byte boundaries.
1813 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1814 if (Subtarget->is64Bit()) {
1815 // Max of 8 and alignment of type.
1816 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1823 if (Subtarget->hasSSE1())
1824 getMaxByValAlign(Ty, Align);
1828 /// Returns the target specific optimal type for load
1829 /// and store operations as a result of memset, memcpy, and memmove
1830 /// lowering. If DstAlign is zero that means it's safe to destination
1831 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1832 /// means there isn't a need to check it against alignment requirement,
1833 /// probably because the source does not need to be loaded. If 'IsMemset' is
1834 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1835 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1836 /// source is constant so it does not need to be loaded.
1837 /// It returns EVT::Other if the type should be determined using generic
1838 /// target-independent logic.
1840 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1841 unsigned DstAlign, unsigned SrcAlign,
1842 bool IsMemset, bool ZeroMemset,
1844 MachineFunction &MF) const {
1845 const Function *F = MF.getFunction();
1846 if ((!IsMemset || ZeroMemset) &&
1847 !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
1848 Attribute::NoImplicitFloat)) {
1850 (Subtarget->isUnalignedMemAccessFast() ||
1851 ((DstAlign == 0 || DstAlign >= 16) &&
1852 (SrcAlign == 0 || SrcAlign >= 16)))) {
1854 if (Subtarget->hasInt256())
1856 if (Subtarget->hasFp256())
1859 if (Subtarget->hasSSE2())
1861 if (Subtarget->hasSSE1())
1863 } else if (!MemcpyStrSrc && Size >= 8 &&
1864 !Subtarget->is64Bit() &&
1865 Subtarget->hasSSE2()) {
1866 // Do not use f64 to lower memcpy if source is string constant. It's
1867 // better to use i32 to avoid the loads.
1871 if (Subtarget->is64Bit() && Size >= 8)
1876 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1878 return X86ScalarSSEf32;
1879 else if (VT == MVT::f64)
1880 return X86ScalarSSEf64;
1885 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1890 *Fast = Subtarget->isUnalignedMemAccessFast();
1894 /// Return the entry encoding for a jump table in the
1895 /// current function. The returned value is a member of the
1896 /// MachineJumpTableInfo::JTEntryKind enum.
1897 unsigned X86TargetLowering::getJumpTableEncoding() const {
1898 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1900 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1901 Subtarget->isPICStyleGOT())
1902 return MachineJumpTableInfo::EK_Custom32;
1904 // Otherwise, use the normal jump table encoding heuristics.
1905 return TargetLowering::getJumpTableEncoding();
1909 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1910 const MachineBasicBlock *MBB,
1911 unsigned uid,MCContext &Ctx) const{
1912 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1913 Subtarget->isPICStyleGOT());
1914 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1916 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1917 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1920 /// Returns relocation base for the given PIC jumptable.
1921 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1922 SelectionDAG &DAG) const {
1923 if (!Subtarget->is64Bit())
1924 // This doesn't have SDLoc associated with it, but is not really the
1925 // same as a Register.
1926 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1930 /// This returns the relocation base for the given PIC jumptable,
1931 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1932 const MCExpr *X86TargetLowering::
1933 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1934 MCContext &Ctx) const {
1935 // X86-64 uses RIP relative addressing based on the jump table label.
1936 if (Subtarget->isPICStyleRIPRel())
1937 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1939 // Otherwise, the reference is relative to the PIC base.
1940 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1943 // FIXME: Why this routine is here? Move to RegInfo!
1944 std::pair<const TargetRegisterClass*, uint8_t>
1945 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1946 const TargetRegisterClass *RRC = nullptr;
1948 switch (VT.SimpleTy) {
1950 return TargetLowering::findRepresentativeClass(VT);
1951 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1952 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1955 RRC = &X86::VR64RegClass;
1957 case MVT::f32: case MVT::f64:
1958 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1959 case MVT::v4f32: case MVT::v2f64:
1960 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1962 RRC = &X86::VR128RegClass;
1965 return std::make_pair(RRC, Cost);
1968 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1969 unsigned &Offset) const {
1970 if (!Subtarget->isTargetLinux())
1973 if (Subtarget->is64Bit()) {
1974 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1976 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1988 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1989 unsigned DestAS) const {
1990 assert(SrcAS != DestAS && "Expected different address spaces!");
1992 return SrcAS < 256 && DestAS < 256;
1995 //===----------------------------------------------------------------------===//
1996 // Return Value Calling Convention Implementation
1997 //===----------------------------------------------------------------------===//
1999 #include "X86GenCallingConv.inc"
2002 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2003 MachineFunction &MF, bool isVarArg,
2004 const SmallVectorImpl<ISD::OutputArg> &Outs,
2005 LLVMContext &Context) const {
2006 SmallVector<CCValAssign, 16> RVLocs;
2007 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2008 return CCInfo.CheckReturn(Outs, RetCC_X86);
2011 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2012 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2017 X86TargetLowering::LowerReturn(SDValue Chain,
2018 CallingConv::ID CallConv, bool isVarArg,
2019 const SmallVectorImpl<ISD::OutputArg> &Outs,
2020 const SmallVectorImpl<SDValue> &OutVals,
2021 SDLoc dl, SelectionDAG &DAG) const {
2022 MachineFunction &MF = DAG.getMachineFunction();
2023 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2025 SmallVector<CCValAssign, 16> RVLocs;
2026 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2027 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2030 SmallVector<SDValue, 6> RetOps;
2031 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2032 // Operand #1 = Bytes To Pop
2033 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2036 // Copy the result values into the output registers.
2037 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2038 CCValAssign &VA = RVLocs[i];
2039 assert(VA.isRegLoc() && "Can only return in registers!");
2040 SDValue ValToCopy = OutVals[i];
2041 EVT ValVT = ValToCopy.getValueType();
2043 // Promote values to the appropriate types.
2044 if (VA.getLocInfo() == CCValAssign::SExt)
2045 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2046 else if (VA.getLocInfo() == CCValAssign::ZExt)
2047 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2048 else if (VA.getLocInfo() == CCValAssign::AExt)
2049 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2050 else if (VA.getLocInfo() == CCValAssign::BCvt)
2051 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2053 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2054 "Unexpected FP-extend for return value.");
2056 // If this is x86-64, and we disabled SSE, we can't return FP values,
2057 // or SSE or MMX vectors.
2058 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2059 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2060 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2061 report_fatal_error("SSE register return with SSE disabled");
2063 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2064 // llvm-gcc has never done it right and no one has noticed, so this
2065 // should be OK for now.
2066 if (ValVT == MVT::f64 &&
2067 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2068 report_fatal_error("SSE2 register return with SSE2 disabled");
2070 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2071 // the RET instruction and handled by the FP Stackifier.
2072 if (VA.getLocReg() == X86::FP0 ||
2073 VA.getLocReg() == X86::FP1) {
2074 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2075 // change the value to the FP stack register class.
2076 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2077 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2078 RetOps.push_back(ValToCopy);
2079 // Don't emit a copytoreg.
2083 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2084 // which is returned in RAX / RDX.
2085 if (Subtarget->is64Bit()) {
2086 if (ValVT == MVT::x86mmx) {
2087 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2088 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2089 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2091 // If we don't have SSE2 available, convert to v4f32 so the generated
2092 // register is legal.
2093 if (!Subtarget->hasSSE2())
2094 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2099 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2100 Flag = Chain.getValue(1);
2101 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2104 // The x86-64 ABIs require that for returning structs by value we copy
2105 // the sret argument into %rax/%eax (depending on ABI) for the return.
2106 // Win32 requires us to put the sret argument to %eax as well.
2107 // We saved the argument into a virtual register in the entry block,
2108 // so now we copy the value out and into %rax/%eax.
2109 if (DAG.getMachineFunction().getFunction()->hasStructRetAttr() &&
2110 (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC())) {
2111 MachineFunction &MF = DAG.getMachineFunction();
2112 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2113 unsigned Reg = FuncInfo->getSRetReturnReg();
2115 "SRetReturnReg should have been set in LowerFormalArguments().");
2116 SDValue Val = DAG.getCopyFromReg(Chain, dl, Reg, getPointerTy());
2119 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2120 X86::RAX : X86::EAX;
2121 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2122 Flag = Chain.getValue(1);
2124 // RAX/EAX now acts like a return value.
2125 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2128 RetOps[0] = Chain; // Update chain.
2130 // Add the flag if we have it.
2132 RetOps.push_back(Flag);
2134 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2137 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2138 if (N->getNumValues() != 1)
2140 if (!N->hasNUsesOfValue(1, 0))
2143 SDValue TCChain = Chain;
2144 SDNode *Copy = *N->use_begin();
2145 if (Copy->getOpcode() == ISD::CopyToReg) {
2146 // If the copy has a glue operand, we conservatively assume it isn't safe to
2147 // perform a tail call.
2148 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2150 TCChain = Copy->getOperand(0);
2151 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2154 bool HasRet = false;
2155 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2157 if (UI->getOpcode() != X86ISD::RET_FLAG)
2159 // If we are returning more than one value, we can definitely
2160 // not make a tail call see PR19530
2161 if (UI->getNumOperands() > 4)
2163 if (UI->getNumOperands() == 4 &&
2164 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2177 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2178 ISD::NodeType ExtendKind) const {
2180 // TODO: Is this also valid on 32-bit?
2181 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2182 ReturnMVT = MVT::i8;
2184 ReturnMVT = MVT::i32;
2186 EVT MinVT = getRegisterType(Context, ReturnMVT);
2187 return VT.bitsLT(MinVT) ? MinVT : VT;
2190 /// Lower the result values of a call into the
2191 /// appropriate copies out of appropriate physical registers.
2194 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2195 CallingConv::ID CallConv, bool isVarArg,
2196 const SmallVectorImpl<ISD::InputArg> &Ins,
2197 SDLoc dl, SelectionDAG &DAG,
2198 SmallVectorImpl<SDValue> &InVals) const {
2200 // Assign locations to each value returned by this call.
2201 SmallVector<CCValAssign, 16> RVLocs;
2202 bool Is64Bit = Subtarget->is64Bit();
2203 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2205 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2207 // Copy all of the result registers out of their specified physreg.
2208 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2209 CCValAssign &VA = RVLocs[i];
2210 EVT CopyVT = VA.getValVT();
2212 // If this is x86-64, and we disabled SSE, we can't return FP values
2213 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2214 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2215 report_fatal_error("SSE register return with SSE disabled");
2218 // If we prefer to use the value in xmm registers, copy it out as f80 and
2219 // use a truncate to move it from fp stack reg to xmm reg.
2220 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2221 isScalarFPTypeInSSEReg(VA.getValVT()))
2224 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2225 CopyVT, InFlag).getValue(1);
2226 SDValue Val = Chain.getValue(0);
2228 if (CopyVT != VA.getValVT())
2229 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2230 // This truncation won't change the value.
2231 DAG.getIntPtrConstant(1));
2233 InFlag = Chain.getValue(2);
2234 InVals.push_back(Val);
2240 //===----------------------------------------------------------------------===//
2241 // C & StdCall & Fast Calling Convention implementation
2242 //===----------------------------------------------------------------------===//
2243 // StdCall calling convention seems to be standard for many Windows' API
2244 // routines and around. It differs from C calling convention just a little:
2245 // callee should clean up the stack, not caller. Symbols should be also
2246 // decorated in some fancy way :) It doesn't support any vector arguments.
2247 // For info on fast calling convention see Fast Calling Convention (tail call)
2248 // implementation LowerX86_32FastCCCallTo.
2250 /// CallIsStructReturn - Determines whether a call uses struct return
2252 enum StructReturnType {
2257 static StructReturnType
2258 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2260 return NotStructReturn;
2262 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2263 if (!Flags.isSRet())
2264 return NotStructReturn;
2265 if (Flags.isInReg())
2266 return RegStructReturn;
2267 return StackStructReturn;
2270 /// Determines whether a function uses struct return semantics.
2271 static StructReturnType
2272 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2274 return NotStructReturn;
2276 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2277 if (!Flags.isSRet())
2278 return NotStructReturn;
2279 if (Flags.isInReg())
2280 return RegStructReturn;
2281 return StackStructReturn;
2284 /// Make a copy of an aggregate at address specified by "Src" to address
2285 /// "Dst" with size and alignment information specified by the specific
2286 /// parameter attribute. The copy will be passed as a byval function parameter.
2288 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2289 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2291 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2293 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2294 /*isVolatile*/false, /*AlwaysInline=*/true,
2295 MachinePointerInfo(), MachinePointerInfo());
2298 /// Return true if the calling convention is one that
2299 /// supports tail call optimization.
2300 static bool IsTailCallConvention(CallingConv::ID CC) {
2301 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2302 CC == CallingConv::HiPE);
2305 /// \brief Return true if the calling convention is a C calling convention.
2306 static bool IsCCallConvention(CallingConv::ID CC) {
2307 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2308 CC == CallingConv::X86_64_SysV);
2311 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2312 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2316 CallingConv::ID CalleeCC = CS.getCallingConv();
2317 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2323 /// Return true if the function is being made into
2324 /// a tailcall target by changing its ABI.
2325 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2326 bool GuaranteedTailCallOpt) {
2327 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2331 X86TargetLowering::LowerMemArgument(SDValue Chain,
2332 CallingConv::ID CallConv,
2333 const SmallVectorImpl<ISD::InputArg> &Ins,
2334 SDLoc dl, SelectionDAG &DAG,
2335 const CCValAssign &VA,
2336 MachineFrameInfo *MFI,
2338 // Create the nodes corresponding to a load from this parameter slot.
2339 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2340 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2341 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2342 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2345 // If value is passed by pointer we have address passed instead of the value
2347 if (VA.getLocInfo() == CCValAssign::Indirect)
2348 ValVT = VA.getLocVT();
2350 ValVT = VA.getValVT();
2352 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2353 // changed with more analysis.
2354 // In case of tail call optimization mark all arguments mutable. Since they
2355 // could be overwritten by lowering of arguments in case of a tail call.
2356 if (Flags.isByVal()) {
2357 unsigned Bytes = Flags.getByValSize();
2358 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2359 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2360 return DAG.getFrameIndex(FI, getPointerTy());
2362 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2363 VA.getLocMemOffset(), isImmutable);
2364 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2365 return DAG.getLoad(ValVT, dl, Chain, FIN,
2366 MachinePointerInfo::getFixedStack(FI),
2367 false, false, false, 0);
2371 // FIXME: Get this from tablegen.
2372 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2373 const X86Subtarget *Subtarget) {
2374 assert(Subtarget->is64Bit());
2376 if (Subtarget->isCallingConvWin64(CallConv)) {
2377 static const MCPhysReg GPR64ArgRegsWin64[] = {
2378 X86::RCX, X86::RDX, X86::R8, X86::R9
2380 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2383 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2384 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2386 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2389 // FIXME: Get this from tablegen.
2390 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2391 CallingConv::ID CallConv,
2392 const X86Subtarget *Subtarget) {
2393 assert(Subtarget->is64Bit());
2394 if (Subtarget->isCallingConvWin64(CallConv)) {
2395 // The XMM registers which might contain var arg parameters are shadowed
2396 // in their paired GPR. So we only need to save the GPR to their home
2398 // TODO: __vectorcall will change this.
2402 const Function *Fn = MF.getFunction();
2403 bool NoImplicitFloatOps = Fn->getAttributes().
2404 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
2405 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2406 "SSE register cannot be used when SSE is disabled!");
2407 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2408 !Subtarget->hasSSE1())
2409 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2413 static const MCPhysReg XMMArgRegs64Bit[] = {
2414 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2415 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2417 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2421 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2422 CallingConv::ID CallConv,
2424 const SmallVectorImpl<ISD::InputArg> &Ins,
2427 SmallVectorImpl<SDValue> &InVals)
2429 MachineFunction &MF = DAG.getMachineFunction();
2430 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2432 const Function* Fn = MF.getFunction();
2433 if (Fn->hasExternalLinkage() &&
2434 Subtarget->isTargetCygMing() &&
2435 Fn->getName() == "main")
2436 FuncInfo->setForceFramePointer(true);
2438 MachineFrameInfo *MFI = MF.getFrameInfo();
2439 bool Is64Bit = Subtarget->is64Bit();
2440 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2442 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2443 "Var args not supported with calling convention fastcc, ghc or hipe");
2445 // Assign locations to all of the incoming arguments.
2446 SmallVector<CCValAssign, 16> ArgLocs;
2447 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2449 // Allocate shadow area for Win64
2451 CCInfo.AllocateStack(32, 8);
2453 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2455 unsigned LastVal = ~0U;
2457 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2458 CCValAssign &VA = ArgLocs[i];
2459 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2461 assert(VA.getValNo() != LastVal &&
2462 "Don't support value assigned to multiple locs yet");
2464 LastVal = VA.getValNo();
2466 if (VA.isRegLoc()) {
2467 EVT RegVT = VA.getLocVT();
2468 const TargetRegisterClass *RC;
2469 if (RegVT == MVT::i32)
2470 RC = &X86::GR32RegClass;
2471 else if (Is64Bit && RegVT == MVT::i64)
2472 RC = &X86::GR64RegClass;
2473 else if (RegVT == MVT::f32)
2474 RC = &X86::FR32RegClass;
2475 else if (RegVT == MVT::f64)
2476 RC = &X86::FR64RegClass;
2477 else if (RegVT.is512BitVector())
2478 RC = &X86::VR512RegClass;
2479 else if (RegVT.is256BitVector())
2480 RC = &X86::VR256RegClass;
2481 else if (RegVT.is128BitVector())
2482 RC = &X86::VR128RegClass;
2483 else if (RegVT == MVT::x86mmx)
2484 RC = &X86::VR64RegClass;
2485 else if (RegVT == MVT::i1)
2486 RC = &X86::VK1RegClass;
2487 else if (RegVT == MVT::v8i1)
2488 RC = &X86::VK8RegClass;
2489 else if (RegVT == MVT::v16i1)
2490 RC = &X86::VK16RegClass;
2491 else if (RegVT == MVT::v32i1)
2492 RC = &X86::VK32RegClass;
2493 else if (RegVT == MVT::v64i1)
2494 RC = &X86::VK64RegClass;
2496 llvm_unreachable("Unknown argument type!");
2498 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2499 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2501 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2502 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2504 if (VA.getLocInfo() == CCValAssign::SExt)
2505 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2506 DAG.getValueType(VA.getValVT()));
2507 else if (VA.getLocInfo() == CCValAssign::ZExt)
2508 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2509 DAG.getValueType(VA.getValVT()));
2510 else if (VA.getLocInfo() == CCValAssign::BCvt)
2511 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2513 if (VA.isExtInLoc()) {
2514 // Handle MMX values passed in XMM regs.
2515 if (RegVT.isVector())
2516 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2518 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2521 assert(VA.isMemLoc());
2522 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2525 // If value is passed via pointer - do a load.
2526 if (VA.getLocInfo() == CCValAssign::Indirect)
2527 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2528 MachinePointerInfo(), false, false, false, 0);
2530 InVals.push_back(ArgValue);
2533 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2534 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2535 // The x86-64 ABIs require that for returning structs by value we copy
2536 // the sret argument into %rax/%eax (depending on ABI) for the return.
2537 // Win32 requires us to put the sret argument to %eax as well.
2538 // Save the argument into a virtual register so that we can access it
2539 // from the return points.
2540 if (Ins[i].Flags.isSRet()) {
2541 unsigned Reg = FuncInfo->getSRetReturnReg();
2543 MVT PtrTy = getPointerTy();
2544 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2545 FuncInfo->setSRetReturnReg(Reg);
2547 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2548 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2554 unsigned StackSize = CCInfo.getNextStackOffset();
2555 // Align stack specially for tail calls.
2556 if (FuncIsMadeTailCallSafe(CallConv,
2557 MF.getTarget().Options.GuaranteedTailCallOpt))
2558 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2560 // If the function takes variable number of arguments, make a frame index for
2561 // the start of the first vararg value... for expansion of llvm.va_start. We
2562 // can skip this if there are no va_start calls.
2563 if (MFI->hasVAStart() &&
2564 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2565 CallConv != CallingConv::X86_ThisCall))) {
2566 FuncInfo->setVarArgsFrameIndex(
2567 MFI->CreateFixedObject(1, StackSize, true));
2570 // Figure out if XMM registers are in use.
2571 assert(!(MF.getTarget().Options.UseSoftFloat &&
2572 Fn->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
2573 Attribute::NoImplicitFloat)) &&
2574 "SSE register cannot be used when SSE is disabled!");
2576 // 64-bit calling conventions support varargs and register parameters, so we
2577 // have to do extra work to spill them in the prologue.
2578 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2579 // Find the first unallocated argument registers.
2580 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2581 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2582 unsigned NumIntRegs =
2583 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2584 unsigned NumXMMRegs =
2585 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2586 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2587 "SSE register cannot be used when SSE is disabled!");
2589 // Gather all the live in physical registers.
2590 SmallVector<SDValue, 6> LiveGPRs;
2591 SmallVector<SDValue, 8> LiveXMMRegs;
2593 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2594 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2596 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2598 if (!ArgXMMs.empty()) {
2599 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2600 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2601 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2602 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2603 LiveXMMRegs.push_back(
2604 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2609 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2610 // Get to the caller-allocated home save location. Add 8 to account
2611 // for the return address.
2612 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2613 FuncInfo->setRegSaveFrameIndex(
2614 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2615 // Fixup to set vararg frame on shadow area (4 x i64).
2617 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2619 // For X86-64, if there are vararg parameters that are passed via
2620 // registers, then we must store them to their spots on the stack so
2621 // they may be loaded by deferencing the result of va_next.
2622 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2623 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2624 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2625 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2628 // Store the integer parameter registers.
2629 SmallVector<SDValue, 8> MemOps;
2630 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2632 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2633 for (SDValue Val : LiveGPRs) {
2634 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2635 DAG.getIntPtrConstant(Offset));
2637 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2638 MachinePointerInfo::getFixedStack(
2639 FuncInfo->getRegSaveFrameIndex(), Offset),
2641 MemOps.push_back(Store);
2645 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2646 // Now store the XMM (fp + vector) parameter registers.
2647 SmallVector<SDValue, 12> SaveXMMOps;
2648 SaveXMMOps.push_back(Chain);
2649 SaveXMMOps.push_back(ALVal);
2650 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2651 FuncInfo->getRegSaveFrameIndex()));
2652 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2653 FuncInfo->getVarArgsFPOffset()));
2654 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2656 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2657 MVT::Other, SaveXMMOps));
2660 if (!MemOps.empty())
2661 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2664 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2665 // Find the largest legal vector type.
2666 MVT VecVT = MVT::Other;
2667 // FIXME: Only some x86_32 calling conventions support AVX512.
2668 if (Subtarget->hasAVX512() &&
2669 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2670 CallConv == CallingConv::Intel_OCL_BI)))
2671 VecVT = MVT::v16f32;
2672 else if (Subtarget->hasAVX())
2674 else if (Subtarget->hasSSE2())
2677 // We forward some GPRs and some vector types.
2678 SmallVector<MVT, 2> RegParmTypes;
2679 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2680 RegParmTypes.push_back(IntVT);
2681 if (VecVT != MVT::Other)
2682 RegParmTypes.push_back(VecVT);
2684 // Compute the set of forwarded registers. The rest are scratch.
2685 SmallVectorImpl<ForwardedRegister> &Forwards =
2686 FuncInfo->getForwardedMustTailRegParms();
2687 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2689 // Conservatively forward AL on x86_64, since it might be used for varargs.
2690 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2691 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2692 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2695 // Copy all forwards from physical to virtual registers.
2696 for (ForwardedRegister &F : Forwards) {
2697 // FIXME: Can we use a less constrained schedule?
2698 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2699 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2700 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2704 // Some CCs need callee pop.
2705 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2706 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2707 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2709 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2710 // If this is an sret function, the return should pop the hidden pointer.
2711 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2712 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2713 argsAreStructReturn(Ins) == StackStructReturn)
2714 FuncInfo->setBytesToPopOnReturn(4);
2718 // RegSaveFrameIndex is X86-64 only.
2719 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2720 if (CallConv == CallingConv::X86_FastCall ||
2721 CallConv == CallingConv::X86_ThisCall)
2722 // fastcc functions can't have varargs.
2723 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2726 FuncInfo->setArgumentStackSize(StackSize);
2732 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2733 SDValue StackPtr, SDValue Arg,
2734 SDLoc dl, SelectionDAG &DAG,
2735 const CCValAssign &VA,
2736 ISD::ArgFlagsTy Flags) const {
2737 unsigned LocMemOffset = VA.getLocMemOffset();
2738 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2739 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2740 if (Flags.isByVal())
2741 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2743 return DAG.getStore(Chain, dl, Arg, PtrOff,
2744 MachinePointerInfo::getStack(LocMemOffset),
2748 /// Emit a load of return address if tail call
2749 /// optimization is performed and it is required.
2751 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2752 SDValue &OutRetAddr, SDValue Chain,
2753 bool IsTailCall, bool Is64Bit,
2754 int FPDiff, SDLoc dl) const {
2755 // Adjust the Return address stack slot.
2756 EVT VT = getPointerTy();
2757 OutRetAddr = getReturnAddressFrameIndex(DAG);
2759 // Load the "old" Return address.
2760 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2761 false, false, false, 0);
2762 return SDValue(OutRetAddr.getNode(), 1);
2765 /// Emit a store of the return address if tail call
2766 /// optimization is performed and it is required (FPDiff!=0).
2767 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2768 SDValue Chain, SDValue RetAddrFrIdx,
2769 EVT PtrVT, unsigned SlotSize,
2770 int FPDiff, SDLoc dl) {
2771 // Store the return address to the appropriate stack slot.
2772 if (!FPDiff) return Chain;
2773 // Calculate the new stack slot for the return address.
2774 int NewReturnAddrFI =
2775 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2777 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2778 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2779 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2785 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2786 SmallVectorImpl<SDValue> &InVals) const {
2787 SelectionDAG &DAG = CLI.DAG;
2789 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2790 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2791 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2792 SDValue Chain = CLI.Chain;
2793 SDValue Callee = CLI.Callee;
2794 CallingConv::ID CallConv = CLI.CallConv;
2795 bool &isTailCall = CLI.IsTailCall;
2796 bool isVarArg = CLI.IsVarArg;
2798 MachineFunction &MF = DAG.getMachineFunction();
2799 bool Is64Bit = Subtarget->is64Bit();
2800 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2801 StructReturnType SR = callIsStructReturn(Outs);
2802 bool IsSibcall = false;
2803 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2805 if (MF.getTarget().Options.DisableTailCalls)
2808 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2810 // Force this to be a tail call. The verifier rules are enough to ensure
2811 // that we can lower this successfully without moving the return address
2814 } else if (isTailCall) {
2815 // Check if it's really possible to do a tail call.
2816 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2817 isVarArg, SR != NotStructReturn,
2818 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2819 Outs, OutVals, Ins, DAG);
2821 // Sibcalls are automatically detected tailcalls which do not require
2823 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2830 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2831 "Var args not supported with calling convention fastcc, ghc or hipe");
2833 // Analyze operands of the call, assigning locations to each operand.
2834 SmallVector<CCValAssign, 16> ArgLocs;
2835 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2837 // Allocate shadow area for Win64
2839 CCInfo.AllocateStack(32, 8);
2841 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2843 // Get a count of how many bytes are to be pushed on the stack.
2844 unsigned NumBytes = CCInfo.getNextStackOffset();
2846 // This is a sibcall. The memory operands are available in caller's
2847 // own caller's stack.
2849 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2850 IsTailCallConvention(CallConv))
2851 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2854 if (isTailCall && !IsSibcall && !IsMustTail) {
2855 // Lower arguments at fp - stackoffset + fpdiff.
2856 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2858 FPDiff = NumBytesCallerPushed - NumBytes;
2860 // Set the delta of movement of the returnaddr stackslot.
2861 // But only set if delta is greater than previous delta.
2862 if (FPDiff < X86Info->getTCReturnAddrDelta())
2863 X86Info->setTCReturnAddrDelta(FPDiff);
2866 unsigned NumBytesToPush = NumBytes;
2867 unsigned NumBytesToPop = NumBytes;
2869 // If we have an inalloca argument, all stack space has already been allocated
2870 // for us and be right at the top of the stack. We don't support multiple
2871 // arguments passed in memory when using inalloca.
2872 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2874 if (!ArgLocs.back().isMemLoc())
2875 report_fatal_error("cannot use inalloca attribute on a register "
2877 if (ArgLocs.back().getLocMemOffset() != 0)
2878 report_fatal_error("any parameter with the inalloca attribute must be "
2879 "the only memory argument");
2883 Chain = DAG.getCALLSEQ_START(
2884 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2886 SDValue RetAddrFrIdx;
2887 // Load return address for tail calls.
2888 if (isTailCall && FPDiff)
2889 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2890 Is64Bit, FPDiff, dl);
2892 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2893 SmallVector<SDValue, 8> MemOpChains;
2896 // Walk the register/memloc assignments, inserting copies/loads. In the case
2897 // of tail call optimization arguments are handle later.
2898 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2899 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2900 // Skip inalloca arguments, they have already been written.
2901 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2902 if (Flags.isInAlloca())
2905 CCValAssign &VA = ArgLocs[i];
2906 EVT RegVT = VA.getLocVT();
2907 SDValue Arg = OutVals[i];
2908 bool isByVal = Flags.isByVal();
2910 // Promote the value if needed.
2911 switch (VA.getLocInfo()) {
2912 default: llvm_unreachable("Unknown loc info!");
2913 case CCValAssign::Full: break;
2914 case CCValAssign::SExt:
2915 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2917 case CCValAssign::ZExt:
2918 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2920 case CCValAssign::AExt:
2921 if (RegVT.is128BitVector()) {
2922 // Special case: passing MMX values in XMM registers.
2923 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2924 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2925 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2927 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2929 case CCValAssign::BCvt:
2930 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2932 case CCValAssign::Indirect: {
2933 // Store the argument.
2934 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2935 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2936 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2937 MachinePointerInfo::getFixedStack(FI),
2944 if (VA.isRegLoc()) {
2945 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2946 if (isVarArg && IsWin64) {
2947 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2948 // shadow reg if callee is a varargs function.
2949 unsigned ShadowReg = 0;
2950 switch (VA.getLocReg()) {
2951 case X86::XMM0: ShadowReg = X86::RCX; break;
2952 case X86::XMM1: ShadowReg = X86::RDX; break;
2953 case X86::XMM2: ShadowReg = X86::R8; break;
2954 case X86::XMM3: ShadowReg = X86::R9; break;
2957 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2959 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2960 assert(VA.isMemLoc());
2961 if (!StackPtr.getNode())
2962 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2964 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2965 dl, DAG, VA, Flags));
2969 if (!MemOpChains.empty())
2970 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2972 if (Subtarget->isPICStyleGOT()) {
2973 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2976 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2977 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2979 // If we are tail calling and generating PIC/GOT style code load the
2980 // address of the callee into ECX. The value in ecx is used as target of
2981 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2982 // for tail calls on PIC/GOT architectures. Normally we would just put the
2983 // address of GOT into ebx and then call target@PLT. But for tail calls
2984 // ebx would be restored (since ebx is callee saved) before jumping to the
2987 // Note: The actual moving to ECX is done further down.
2988 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2989 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2990 !G->getGlobal()->hasProtectedVisibility())
2991 Callee = LowerGlobalAddress(Callee, DAG);
2992 else if (isa<ExternalSymbolSDNode>(Callee))
2993 Callee = LowerExternalSymbol(Callee, DAG);
2997 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
2998 // From AMD64 ABI document:
2999 // For calls that may call functions that use varargs or stdargs
3000 // (prototype-less calls or calls to functions containing ellipsis (...) in
3001 // the declaration) %al is used as hidden argument to specify the number
3002 // of SSE registers used. The contents of %al do not need to match exactly
3003 // the number of registers, but must be an ubound on the number of SSE
3004 // registers used and is in the range 0 - 8 inclusive.
3006 // Count the number of XMM registers allocated.
3007 static const MCPhysReg XMMArgRegs[] = {
3008 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3009 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3011 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3012 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3013 && "SSE registers cannot be used when SSE is disabled");
3015 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3016 DAG.getConstant(NumXMMRegs, MVT::i8)));
3019 if (isVarArg && IsMustTail) {
3020 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3021 for (const auto &F : Forwards) {
3022 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3023 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3027 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3028 // don't need this because the eligibility check rejects calls that require
3029 // shuffling arguments passed in memory.
3030 if (!IsSibcall && isTailCall) {
3031 // Force all the incoming stack arguments to be loaded from the stack
3032 // before any new outgoing arguments are stored to the stack, because the
3033 // outgoing stack slots may alias the incoming argument stack slots, and
3034 // the alias isn't otherwise explicit. This is slightly more conservative
3035 // than necessary, because it means that each store effectively depends
3036 // on every argument instead of just those arguments it would clobber.
3037 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3039 SmallVector<SDValue, 8> MemOpChains2;
3042 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3043 CCValAssign &VA = ArgLocs[i];
3046 assert(VA.isMemLoc());
3047 SDValue Arg = OutVals[i];
3048 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3049 // Skip inalloca arguments. They don't require any work.
3050 if (Flags.isInAlloca())
3052 // Create frame index.
3053 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3054 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3055 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3056 FIN = DAG.getFrameIndex(FI, getPointerTy());
3058 if (Flags.isByVal()) {
3059 // Copy relative to framepointer.
3060 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3061 if (!StackPtr.getNode())
3062 StackPtr = DAG.getCopyFromReg(Chain, dl,
3063 RegInfo->getStackRegister(),
3065 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3067 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3071 // Store relative to framepointer.
3072 MemOpChains2.push_back(
3073 DAG.getStore(ArgChain, dl, Arg, FIN,
3074 MachinePointerInfo::getFixedStack(FI),
3079 if (!MemOpChains2.empty())
3080 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3082 // Store the return address to the appropriate stack slot.
3083 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3084 getPointerTy(), RegInfo->getSlotSize(),
3088 // Build a sequence of copy-to-reg nodes chained together with token chain
3089 // and flag operands which copy the outgoing args into registers.
3091 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3092 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3093 RegsToPass[i].second, InFlag);
3094 InFlag = Chain.getValue(1);
3097 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3098 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3099 // In the 64-bit large code model, we have to make all calls
3100 // through a register, since the call instruction's 32-bit
3101 // pc-relative offset may not be large enough to hold the whole
3103 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3104 // If the callee is a GlobalAddress node (quite common, every direct call
3105 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3107 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3109 // We should use extra load for direct calls to dllimported functions in
3111 const GlobalValue *GV = G->getGlobal();
3112 if (!GV->hasDLLImportStorageClass()) {
3113 unsigned char OpFlags = 0;
3114 bool ExtraLoad = false;
3115 unsigned WrapperKind = ISD::DELETED_NODE;
3117 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3118 // external symbols most go through the PLT in PIC mode. If the symbol
3119 // has hidden or protected visibility, or if it is static or local, then
3120 // we don't need to use the PLT - we can directly call it.
3121 if (Subtarget->isTargetELF() &&
3122 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3123 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3124 OpFlags = X86II::MO_PLT;
3125 } else if (Subtarget->isPICStyleStubAny() &&
3126 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3127 (!Subtarget->getTargetTriple().isMacOSX() ||
3128 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3129 // PC-relative references to external symbols should go through $stub,
3130 // unless we're building with the leopard linker or later, which
3131 // automatically synthesizes these stubs.
3132 OpFlags = X86II::MO_DARWIN_STUB;
3133 } else if (Subtarget->isPICStyleRIPRel() &&
3134 isa<Function>(GV) &&
3135 cast<Function>(GV)->getAttributes().
3136 hasAttribute(AttributeSet::FunctionIndex,
3137 Attribute::NonLazyBind)) {
3138 // If the function is marked as non-lazy, generate an indirect call
3139 // which loads from the GOT directly. This avoids runtime overhead
3140 // at the cost of eager binding (and one extra byte of encoding).
3141 OpFlags = X86II::MO_GOTPCREL;
3142 WrapperKind = X86ISD::WrapperRIP;
3146 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3147 G->getOffset(), OpFlags);
3149 // Add a wrapper if needed.
3150 if (WrapperKind != ISD::DELETED_NODE)
3151 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3152 // Add extra indirection if needed.
3154 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3155 MachinePointerInfo::getGOT(),
3156 false, false, false, 0);
3158 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3159 unsigned char OpFlags = 0;
3161 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3162 // external symbols should go through the PLT.
3163 if (Subtarget->isTargetELF() &&
3164 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3165 OpFlags = X86II::MO_PLT;
3166 } else if (Subtarget->isPICStyleStubAny() &&
3167 (!Subtarget->getTargetTriple().isMacOSX() ||
3168 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3169 // PC-relative references to external symbols should go through $stub,
3170 // unless we're building with the leopard linker or later, which
3171 // automatically synthesizes these stubs.
3172 OpFlags = X86II::MO_DARWIN_STUB;
3175 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3177 } else if (Subtarget->isTarget64BitILP32() &&
3178 Callee->getValueType(0) == MVT::i32) {
3179 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3180 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3183 // Returns a chain & a flag for retval copy to use.
3184 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3185 SmallVector<SDValue, 8> Ops;
3187 if (!IsSibcall && isTailCall) {
3188 Chain = DAG.getCALLSEQ_END(Chain,
3189 DAG.getIntPtrConstant(NumBytesToPop, true),
3190 DAG.getIntPtrConstant(0, true), InFlag, dl);
3191 InFlag = Chain.getValue(1);
3194 Ops.push_back(Chain);
3195 Ops.push_back(Callee);
3198 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3200 // Add argument registers to the end of the list so that they are known live
3202 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3203 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3204 RegsToPass[i].second.getValueType()));
3206 // Add a register mask operand representing the call-preserved registers.
3207 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3208 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3209 assert(Mask && "Missing call preserved mask for calling convention");
3210 Ops.push_back(DAG.getRegisterMask(Mask));
3212 if (InFlag.getNode())
3213 Ops.push_back(InFlag);
3217 //// If this is the first return lowered for this function, add the regs
3218 //// to the liveout set for the function.
3219 // This isn't right, although it's probably harmless on x86; liveouts
3220 // should be computed from returns not tail calls. Consider a void
3221 // function making a tail call to a function returning int.
3222 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3225 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3226 InFlag = Chain.getValue(1);
3228 // Create the CALLSEQ_END node.
3229 unsigned NumBytesForCalleeToPop;
3230 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3231 DAG.getTarget().Options.GuaranteedTailCallOpt))
3232 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3233 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3234 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3235 SR == StackStructReturn)
3236 // If this is a call to a struct-return function, the callee
3237 // pops the hidden struct pointer, so we have to push it back.
3238 // This is common for Darwin/X86, Linux & Mingw32 targets.
3239 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3240 NumBytesForCalleeToPop = 4;
3242 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3244 // Returns a flag for retval copy to use.
3246 Chain = DAG.getCALLSEQ_END(Chain,
3247 DAG.getIntPtrConstant(NumBytesToPop, true),
3248 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3251 InFlag = Chain.getValue(1);
3254 // Handle result values, copying them out of physregs into vregs that we
3256 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3257 Ins, dl, DAG, InVals);
3260 //===----------------------------------------------------------------------===//
3261 // Fast Calling Convention (tail call) implementation
3262 //===----------------------------------------------------------------------===//
3264 // Like std call, callee cleans arguments, convention except that ECX is
3265 // reserved for storing the tail called function address. Only 2 registers are
3266 // free for argument passing (inreg). Tail call optimization is performed
3268 // * tailcallopt is enabled
3269 // * caller/callee are fastcc
3270 // On X86_64 architecture with GOT-style position independent code only local
3271 // (within module) calls are supported at the moment.
3272 // To keep the stack aligned according to platform abi the function
3273 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3274 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3275 // If a tail called function callee has more arguments than the caller the
3276 // caller needs to make sure that there is room to move the RETADDR to. This is
3277 // achieved by reserving an area the size of the argument delta right after the
3278 // original RETADDR, but before the saved framepointer or the spilled registers
3279 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3291 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3292 /// for a 16 byte align requirement.
3294 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3295 SelectionDAG& DAG) const {
3296 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3297 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3298 unsigned StackAlignment = TFI.getStackAlignment();
3299 uint64_t AlignMask = StackAlignment - 1;
3300 int64_t Offset = StackSize;
3301 unsigned SlotSize = RegInfo->getSlotSize();
3302 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3303 // Number smaller than 12 so just add the difference.
3304 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3306 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3307 Offset = ((~AlignMask) & Offset) + StackAlignment +
3308 (StackAlignment-SlotSize);
3313 /// MatchingStackOffset - Return true if the given stack call argument is
3314 /// already available in the same position (relatively) of the caller's
3315 /// incoming argument stack.
3317 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3318 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3319 const X86InstrInfo *TII) {
3320 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3322 if (Arg.getOpcode() == ISD::CopyFromReg) {
3323 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3324 if (!TargetRegisterInfo::isVirtualRegister(VR))
3326 MachineInstr *Def = MRI->getVRegDef(VR);
3329 if (!Flags.isByVal()) {
3330 if (!TII->isLoadFromStackSlot(Def, FI))
3333 unsigned Opcode = Def->getOpcode();
3334 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3335 Opcode == X86::LEA64_32r) &&
3336 Def->getOperand(1).isFI()) {
3337 FI = Def->getOperand(1).getIndex();
3338 Bytes = Flags.getByValSize();
3342 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3343 if (Flags.isByVal())
3344 // ByVal argument is passed in as a pointer but it's now being
3345 // dereferenced. e.g.
3346 // define @foo(%struct.X* %A) {
3347 // tail call @bar(%struct.X* byval %A)
3350 SDValue Ptr = Ld->getBasePtr();
3351 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3354 FI = FINode->getIndex();
3355 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3356 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3357 FI = FINode->getIndex();
3358 Bytes = Flags.getByValSize();
3362 assert(FI != INT_MAX);
3363 if (!MFI->isFixedObjectIndex(FI))
3365 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3368 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3369 /// for tail call optimization. Targets which want to do tail call
3370 /// optimization should implement this function.
3372 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3373 CallingConv::ID CalleeCC,
3375 bool isCalleeStructRet,
3376 bool isCallerStructRet,
3378 const SmallVectorImpl<ISD::OutputArg> &Outs,
3379 const SmallVectorImpl<SDValue> &OutVals,
3380 const SmallVectorImpl<ISD::InputArg> &Ins,
3381 SelectionDAG &DAG) const {
3382 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3385 // If -tailcallopt is specified, make fastcc functions tail-callable.
3386 const MachineFunction &MF = DAG.getMachineFunction();
3387 const Function *CallerF = MF.getFunction();
3389 // If the function return type is x86_fp80 and the callee return type is not,
3390 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3391 // perform a tailcall optimization here.
3392 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3395 CallingConv::ID CallerCC = CallerF->getCallingConv();
3396 bool CCMatch = CallerCC == CalleeCC;
3397 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3398 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3400 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3401 if (IsTailCallConvention(CalleeCC) && CCMatch)
3406 // Look for obvious safe cases to perform tail call optimization that do not
3407 // require ABI changes. This is what gcc calls sibcall.
3409 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3410 // emit a special epilogue.
3411 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3412 if (RegInfo->needsStackRealignment(MF))
3415 // Also avoid sibcall optimization if either caller or callee uses struct
3416 // return semantics.
3417 if (isCalleeStructRet || isCallerStructRet)
3420 // An stdcall/thiscall caller is expected to clean up its arguments; the
3421 // callee isn't going to do that.
3422 // FIXME: this is more restrictive than needed. We could produce a tailcall
3423 // when the stack adjustment matches. For example, with a thiscall that takes
3424 // only one argument.
3425 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3426 CallerCC == CallingConv::X86_ThisCall))
3429 // Do not sibcall optimize vararg calls unless all arguments are passed via
3431 if (isVarArg && !Outs.empty()) {
3433 // Optimizing for varargs on Win64 is unlikely to be safe without
3434 // additional testing.
3435 if (IsCalleeWin64 || IsCallerWin64)
3438 SmallVector<CCValAssign, 16> ArgLocs;
3439 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3442 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3443 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3444 if (!ArgLocs[i].isRegLoc())
3448 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3449 // stack. Therefore, if it's not used by the call it is not safe to optimize
3450 // this into a sibcall.
3451 bool Unused = false;
3452 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3459 SmallVector<CCValAssign, 16> RVLocs;
3460 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3462 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3463 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3464 CCValAssign &VA = RVLocs[i];
3465 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3470 // If the calling conventions do not match, then we'd better make sure the
3471 // results are returned in the same way as what the caller expects.
3473 SmallVector<CCValAssign, 16> RVLocs1;
3474 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3476 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3478 SmallVector<CCValAssign, 16> RVLocs2;
3479 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3481 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3483 if (RVLocs1.size() != RVLocs2.size())
3485 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3486 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3488 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3490 if (RVLocs1[i].isRegLoc()) {
3491 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3494 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3500 // If the callee takes no arguments then go on to check the results of the
3502 if (!Outs.empty()) {
3503 // Check if stack adjustment is needed. For now, do not do this if any
3504 // argument is passed on the stack.
3505 SmallVector<CCValAssign, 16> ArgLocs;
3506 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3509 // Allocate shadow area for Win64
3511 CCInfo.AllocateStack(32, 8);
3513 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3514 if (CCInfo.getNextStackOffset()) {
3515 MachineFunction &MF = DAG.getMachineFunction();
3516 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3519 // Check if the arguments are already laid out in the right way as
3520 // the caller's fixed stack objects.
3521 MachineFrameInfo *MFI = MF.getFrameInfo();
3522 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3523 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3524 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3525 CCValAssign &VA = ArgLocs[i];
3526 SDValue Arg = OutVals[i];
3527 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3528 if (VA.getLocInfo() == CCValAssign::Indirect)
3530 if (!VA.isRegLoc()) {
3531 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3538 // If the tailcall address may be in a register, then make sure it's
3539 // possible to register allocate for it. In 32-bit, the call address can
3540 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3541 // callee-saved registers are restored. These happen to be the same
3542 // registers used to pass 'inreg' arguments so watch out for those.
3543 if (!Subtarget->is64Bit() &&
3544 ((!isa<GlobalAddressSDNode>(Callee) &&
3545 !isa<ExternalSymbolSDNode>(Callee)) ||
3546 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3547 unsigned NumInRegs = 0;
3548 // In PIC we need an extra register to formulate the address computation
3550 unsigned MaxInRegs =
3551 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3553 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3554 CCValAssign &VA = ArgLocs[i];
3557 unsigned Reg = VA.getLocReg();
3560 case X86::EAX: case X86::EDX: case X86::ECX:
3561 if (++NumInRegs == MaxInRegs)
3573 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3574 const TargetLibraryInfo *libInfo) const {
3575 return X86::createFastISel(funcInfo, libInfo);
3578 //===----------------------------------------------------------------------===//
3579 // Other Lowering Hooks
3580 //===----------------------------------------------------------------------===//
3582 static bool MayFoldLoad(SDValue Op) {
3583 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3586 static bool MayFoldIntoStore(SDValue Op) {
3587 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3590 static bool isTargetShuffle(unsigned Opcode) {
3592 default: return false;
3593 case X86ISD::BLENDI:
3594 case X86ISD::PSHUFB:
3595 case X86ISD::PSHUFD:
3596 case X86ISD::PSHUFHW:
3597 case X86ISD::PSHUFLW:
3599 case X86ISD::PALIGNR:
3600 case X86ISD::MOVLHPS:
3601 case X86ISD::MOVLHPD:
3602 case X86ISD::MOVHLPS:
3603 case X86ISD::MOVLPS:
3604 case X86ISD::MOVLPD:
3605 case X86ISD::MOVSHDUP:
3606 case X86ISD::MOVSLDUP:
3607 case X86ISD::MOVDDUP:
3610 case X86ISD::UNPCKL:
3611 case X86ISD::UNPCKH:
3612 case X86ISD::VPERMILPI:
3613 case X86ISD::VPERM2X128:
3614 case X86ISD::VPERMI:
3619 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3620 SDValue V1, SelectionDAG &DAG) {
3622 default: llvm_unreachable("Unknown x86 shuffle node");
3623 case X86ISD::MOVSHDUP:
3624 case X86ISD::MOVSLDUP:
3625 case X86ISD::MOVDDUP:
3626 return DAG.getNode(Opc, dl, VT, V1);
3630 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3631 SDValue V1, unsigned TargetMask,
3632 SelectionDAG &DAG) {
3634 default: llvm_unreachable("Unknown x86 shuffle node");
3635 case X86ISD::PSHUFD:
3636 case X86ISD::PSHUFHW:
3637 case X86ISD::PSHUFLW:
3638 case X86ISD::VPERMILPI:
3639 case X86ISD::VPERMI:
3640 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3644 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3645 SDValue V1, SDValue V2, unsigned TargetMask,
3646 SelectionDAG &DAG) {
3648 default: llvm_unreachable("Unknown x86 shuffle node");
3649 case X86ISD::PALIGNR:
3650 case X86ISD::VALIGN:
3652 case X86ISD::VPERM2X128:
3653 return DAG.getNode(Opc, dl, VT, V1, V2,
3654 DAG.getConstant(TargetMask, MVT::i8));
3658 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3659 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3661 default: llvm_unreachable("Unknown x86 shuffle node");
3662 case X86ISD::MOVLHPS:
3663 case X86ISD::MOVLHPD:
3664 case X86ISD::MOVHLPS:
3665 case X86ISD::MOVLPS:
3666 case X86ISD::MOVLPD:
3669 case X86ISD::UNPCKL:
3670 case X86ISD::UNPCKH:
3671 return DAG.getNode(Opc, dl, VT, V1, V2);
3675 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3676 MachineFunction &MF = DAG.getMachineFunction();
3677 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3678 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3679 int ReturnAddrIndex = FuncInfo->getRAIndex();
3681 if (ReturnAddrIndex == 0) {
3682 // Set up a frame object for the return address.
3683 unsigned SlotSize = RegInfo->getSlotSize();
3684 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3687 FuncInfo->setRAIndex(ReturnAddrIndex);
3690 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3693 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3694 bool hasSymbolicDisplacement) {
3695 // Offset should fit into 32 bit immediate field.
3696 if (!isInt<32>(Offset))
3699 // If we don't have a symbolic displacement - we don't have any extra
3701 if (!hasSymbolicDisplacement)
3704 // FIXME: Some tweaks might be needed for medium code model.
3705 if (M != CodeModel::Small && M != CodeModel::Kernel)
3708 // For small code model we assume that latest object is 16MB before end of 31
3709 // bits boundary. We may also accept pretty large negative constants knowing
3710 // that all objects are in the positive half of address space.
3711 if (M == CodeModel::Small && Offset < 16*1024*1024)
3714 // For kernel code model we know that all object resist in the negative half
3715 // of 32bits address space. We may not accept negative offsets, since they may
3716 // be just off and we may accept pretty large positive ones.
3717 if (M == CodeModel::Kernel && Offset >= 0)
3723 /// isCalleePop - Determines whether the callee is required to pop its
3724 /// own arguments. Callee pop is necessary to support tail calls.
3725 bool X86::isCalleePop(CallingConv::ID CallingConv,
3726 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3727 switch (CallingConv) {
3730 case CallingConv::X86_StdCall:
3731 case CallingConv::X86_FastCall:
3732 case CallingConv::X86_ThisCall:
3734 case CallingConv::Fast:
3735 case CallingConv::GHC:
3736 case CallingConv::HiPE:
3743 /// \brief Return true if the condition is an unsigned comparison operation.
3744 static bool isX86CCUnsigned(unsigned X86CC) {
3746 default: llvm_unreachable("Invalid integer condition!");
3747 case X86::COND_E: return true;
3748 case X86::COND_G: return false;
3749 case X86::COND_GE: return false;
3750 case X86::COND_L: return false;
3751 case X86::COND_LE: return false;
3752 case X86::COND_NE: return true;
3753 case X86::COND_B: return true;
3754 case X86::COND_A: return true;
3755 case X86::COND_BE: return true;
3756 case X86::COND_AE: return true;
3758 llvm_unreachable("covered switch fell through?!");
3761 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3762 /// specific condition code, returning the condition code and the LHS/RHS of the
3763 /// comparison to make.
3764 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3765 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3767 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3768 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3769 // X > -1 -> X == 0, jump !sign.
3770 RHS = DAG.getConstant(0, RHS.getValueType());
3771 return X86::COND_NS;
3773 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3774 // X < 0 -> X == 0, jump on sign.
3777 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3779 RHS = DAG.getConstant(0, RHS.getValueType());
3780 return X86::COND_LE;
3784 switch (SetCCOpcode) {
3785 default: llvm_unreachable("Invalid integer condition!");
3786 case ISD::SETEQ: return X86::COND_E;
3787 case ISD::SETGT: return X86::COND_G;
3788 case ISD::SETGE: return X86::COND_GE;
3789 case ISD::SETLT: return X86::COND_L;
3790 case ISD::SETLE: return X86::COND_LE;
3791 case ISD::SETNE: return X86::COND_NE;
3792 case ISD::SETULT: return X86::COND_B;
3793 case ISD::SETUGT: return X86::COND_A;
3794 case ISD::SETULE: return X86::COND_BE;
3795 case ISD::SETUGE: return X86::COND_AE;
3799 // First determine if it is required or is profitable to flip the operands.
3801 // If LHS is a foldable load, but RHS is not, flip the condition.
3802 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3803 !ISD::isNON_EXTLoad(RHS.getNode())) {
3804 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3805 std::swap(LHS, RHS);
3808 switch (SetCCOpcode) {
3814 std::swap(LHS, RHS);
3818 // On a floating point condition, the flags are set as follows:
3820 // 0 | 0 | 0 | X > Y
3821 // 0 | 0 | 1 | X < Y
3822 // 1 | 0 | 0 | X == Y
3823 // 1 | 1 | 1 | unordered
3824 switch (SetCCOpcode) {
3825 default: llvm_unreachable("Condcode should be pre-legalized away");
3827 case ISD::SETEQ: return X86::COND_E;
3828 case ISD::SETOLT: // flipped
3830 case ISD::SETGT: return X86::COND_A;
3831 case ISD::SETOLE: // flipped
3833 case ISD::SETGE: return X86::COND_AE;
3834 case ISD::SETUGT: // flipped
3836 case ISD::SETLT: return X86::COND_B;
3837 case ISD::SETUGE: // flipped
3839 case ISD::SETLE: return X86::COND_BE;
3841 case ISD::SETNE: return X86::COND_NE;
3842 case ISD::SETUO: return X86::COND_P;
3843 case ISD::SETO: return X86::COND_NP;
3845 case ISD::SETUNE: return X86::COND_INVALID;
3849 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3850 /// code. Current x86 isa includes the following FP cmov instructions:
3851 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3852 static bool hasFPCMov(unsigned X86CC) {
3868 /// isFPImmLegal - Returns true if the target can instruction select the
3869 /// specified FP immediate natively. If false, the legalizer will
3870 /// materialize the FP immediate as a load from a constant pool.
3871 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3872 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3873 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3879 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3880 ISD::LoadExtType ExtTy,
3882 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3883 // relocation target a movq or addq instruction: don't let the load shrink.
3884 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3885 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3886 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3887 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3891 /// \brief Returns true if it is beneficial to convert a load of a constant
3892 /// to just the constant itself.
3893 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3895 assert(Ty->isIntegerTy());
3897 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3898 if (BitSize == 0 || BitSize > 64)
3903 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3904 unsigned Index) const {
3905 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3908 return (Index == 0 || Index == ResVT.getVectorNumElements());
3911 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3912 // Speculate cttz only if we can directly use TZCNT.
3913 return Subtarget->hasBMI();
3916 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3917 // Speculate ctlz only if we can directly use LZCNT.
3918 return Subtarget->hasLZCNT();
3921 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3922 /// the specified range (L, H].
3923 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3924 return (Val < 0) || (Val >= Low && Val < Hi);
3927 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3928 /// specified value.
3929 static bool isUndefOrEqual(int Val, int CmpVal) {
3930 return (Val < 0 || Val == CmpVal);
3933 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3934 /// from position Pos and ending in Pos+Size, falls within the specified
3935 /// sequential range (Low, Low+Size]. or is undef.
3936 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3937 unsigned Pos, unsigned Size, int Low) {
3938 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3939 if (!isUndefOrEqual(Mask[i], Low))
3944 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3945 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3946 /// operand - by default will match for first operand.
3947 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3948 bool TestSecondOperand = false) {
3949 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3950 VT != MVT::v2f64 && VT != MVT::v2i64)
3953 unsigned NumElems = VT.getVectorNumElements();
3954 unsigned Lo = TestSecondOperand ? NumElems : 0;
3955 unsigned Hi = Lo + NumElems;
3957 for (unsigned i = 0; i < NumElems; ++i)
3958 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3964 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3965 /// is suitable for input to PSHUFHW.
3966 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3967 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3970 // Lower quadword copied in order or undef.
3971 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3974 // Upper quadword shuffled.
3975 for (unsigned i = 4; i != 8; ++i)
3976 if (!isUndefOrInRange(Mask[i], 4, 8))
3979 if (VT == MVT::v16i16) {
3980 // Lower quadword copied in order or undef.
3981 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3984 // Upper quadword shuffled.
3985 for (unsigned i = 12; i != 16; ++i)
3986 if (!isUndefOrInRange(Mask[i], 12, 16))
3993 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3994 /// is suitable for input to PSHUFLW.
3995 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3996 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3999 // Upper quadword copied in order.
4000 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
4003 // Lower quadword shuffled.
4004 for (unsigned i = 0; i != 4; ++i)
4005 if (!isUndefOrInRange(Mask[i], 0, 4))
4008 if (VT == MVT::v16i16) {
4009 // Upper quadword copied in order.
4010 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4013 // Lower quadword shuffled.
4014 for (unsigned i = 8; i != 12; ++i)
4015 if (!isUndefOrInRange(Mask[i], 8, 12))
4022 /// \brief Return true if the mask specifies a shuffle of elements that is
4023 /// suitable for input to intralane (palignr) or interlane (valign) vector
4025 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4026 unsigned NumElts = VT.getVectorNumElements();
4027 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4028 unsigned NumLaneElts = NumElts/NumLanes;
4030 // Do not handle 64-bit element shuffles with palignr.
4031 if (NumLaneElts == 2)
4034 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4036 for (i = 0; i != NumLaneElts; ++i) {
4041 // Lane is all undef, go to next lane
4042 if (i == NumLaneElts)
4045 int Start = Mask[i+l];
4047 // Make sure its in this lane in one of the sources
4048 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4049 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4052 // If not lane 0, then we must match lane 0
4053 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4056 // Correct second source to be contiguous with first source
4057 if (Start >= (int)NumElts)
4058 Start -= NumElts - NumLaneElts;
4060 // Make sure we're shifting in the right direction.
4061 if (Start <= (int)(i+l))
4066 // Check the rest of the elements to see if they are consecutive.
4067 for (++i; i != NumLaneElts; ++i) {
4068 int Idx = Mask[i+l];
4070 // Make sure its in this lane
4071 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4072 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4075 // If not lane 0, then we must match lane 0
4076 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4079 if (Idx >= (int)NumElts)
4080 Idx -= NumElts - NumLaneElts;
4082 if (!isUndefOrEqual(Idx, Start+i))
4091 /// \brief Return true if the node specifies a shuffle of elements that is
4092 /// suitable for input to PALIGNR.
4093 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4094 const X86Subtarget *Subtarget) {
4095 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4096 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4097 VT.is512BitVector())
4098 // FIXME: Add AVX512BW.
4101 return isAlignrMask(Mask, VT, false);
4104 /// \brief Return true if the node specifies a shuffle of elements that is
4105 /// suitable for input to VALIGN.
4106 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4107 const X86Subtarget *Subtarget) {
4108 // FIXME: Add AVX512VL.
4109 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4111 return isAlignrMask(Mask, VT, true);
4114 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4115 /// the two vector operands have swapped position.
4116 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4117 unsigned NumElems) {
4118 for (unsigned i = 0; i != NumElems; ++i) {
4122 else if (idx < (int)NumElems)
4123 Mask[i] = idx + NumElems;
4125 Mask[i] = idx - NumElems;
4129 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4130 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4131 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4132 /// reverse of what x86 shuffles want.
4133 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4135 unsigned NumElems = VT.getVectorNumElements();
4136 unsigned NumLanes = VT.getSizeInBits()/128;
4137 unsigned NumLaneElems = NumElems/NumLanes;
4139 if (NumLaneElems != 2 && NumLaneElems != 4)
4142 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4143 bool symetricMaskRequired =
4144 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4146 // VSHUFPSY divides the resulting vector into 4 chunks.
4147 // The sources are also splitted into 4 chunks, and each destination
4148 // chunk must come from a different source chunk.
4150 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4151 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4153 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4154 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4156 // VSHUFPDY divides the resulting vector into 4 chunks.
4157 // The sources are also splitted into 4 chunks, and each destination
4158 // chunk must come from a different source chunk.
4160 // SRC1 => X3 X2 X1 X0
4161 // SRC2 => Y3 Y2 Y1 Y0
4163 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4165 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4166 unsigned HalfLaneElems = NumLaneElems/2;
4167 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4168 for (unsigned i = 0; i != NumLaneElems; ++i) {
4169 int Idx = Mask[i+l];
4170 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4171 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4173 // For VSHUFPSY, the mask of the second half must be the same as the
4174 // first but with the appropriate offsets. This works in the same way as
4175 // VPERMILPS works with masks.
4176 if (!symetricMaskRequired || Idx < 0)
4178 if (MaskVal[i] < 0) {
4179 MaskVal[i] = Idx - l;
4182 if ((signed)(Idx - l) != MaskVal[i])
4190 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4191 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4192 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4193 if (!VT.is128BitVector())
4196 unsigned NumElems = VT.getVectorNumElements();
4201 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4202 return isUndefOrEqual(Mask[0], 6) &&
4203 isUndefOrEqual(Mask[1], 7) &&
4204 isUndefOrEqual(Mask[2], 2) &&
4205 isUndefOrEqual(Mask[3], 3);
4208 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4209 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4211 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4212 if (!VT.is128BitVector())
4215 unsigned NumElems = VT.getVectorNumElements();
4220 return isUndefOrEqual(Mask[0], 2) &&
4221 isUndefOrEqual(Mask[1], 3) &&
4222 isUndefOrEqual(Mask[2], 2) &&
4223 isUndefOrEqual(Mask[3], 3);
4226 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4227 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4228 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4229 if (!VT.is128BitVector())
4232 unsigned NumElems = VT.getVectorNumElements();
4234 if (NumElems != 2 && NumElems != 4)
4237 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4238 if (!isUndefOrEqual(Mask[i], i + NumElems))
4241 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4242 if (!isUndefOrEqual(Mask[i], i))
4248 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4249 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4250 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4251 if (!VT.is128BitVector())
4254 unsigned NumElems = VT.getVectorNumElements();
4256 if (NumElems != 2 && NumElems != 4)
4259 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4260 if (!isUndefOrEqual(Mask[i], i))
4263 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4264 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4270 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4271 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4272 /// i. e: If all but one element come from the same vector.
4273 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4274 // TODO: Deal with AVX's VINSERTPS
4275 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4278 unsigned CorrectPosV1 = 0;
4279 unsigned CorrectPosV2 = 0;
4280 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4281 if (Mask[i] == -1) {
4289 else if (Mask[i] == i + 4)
4293 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4294 // We have 3 elements (undefs count as elements from any vector) from one
4295 // vector, and one from another.
4302 // Some special combinations that can be optimized.
4305 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4306 SelectionDAG &DAG) {
4307 MVT VT = SVOp->getSimpleValueType(0);
4310 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4313 ArrayRef<int> Mask = SVOp->getMask();
4315 // These are the special masks that may be optimized.
4316 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4317 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4318 bool MatchEvenMask = true;
4319 bool MatchOddMask = true;
4320 for (int i=0; i<8; ++i) {
4321 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4322 MatchEvenMask = false;
4323 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4324 MatchOddMask = false;
4327 if (!MatchEvenMask && !MatchOddMask)
4330 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4332 SDValue Op0 = SVOp->getOperand(0);
4333 SDValue Op1 = SVOp->getOperand(1);
4335 if (MatchEvenMask) {
4336 // Shift the second operand right to 32 bits.
4337 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4338 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4340 // Shift the first operand left to 32 bits.
4341 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4342 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4344 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4345 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4348 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4349 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4350 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4351 bool HasInt256, bool V2IsSplat = false) {
4353 assert(VT.getSizeInBits() >= 128 &&
4354 "Unsupported vector type for unpckl");
4356 unsigned NumElts = VT.getVectorNumElements();
4357 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4358 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4361 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4362 "Unsupported vector type for unpckh");
4364 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4365 unsigned NumLanes = VT.getSizeInBits()/128;
4366 unsigned NumLaneElts = NumElts/NumLanes;
4368 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4369 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4370 int BitI = Mask[l+i];
4371 int BitI1 = Mask[l+i+1];
4372 if (!isUndefOrEqual(BitI, j))
4375 if (!isUndefOrEqual(BitI1, NumElts))
4378 if (!isUndefOrEqual(BitI1, j + NumElts))
4387 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4388 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4389 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4390 bool HasInt256, bool V2IsSplat = false) {
4391 assert(VT.getSizeInBits() >= 128 &&
4392 "Unsupported vector type for unpckh");
4394 unsigned NumElts = VT.getVectorNumElements();
4395 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4396 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4399 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4400 "Unsupported vector type for unpckh");
4402 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4403 unsigned NumLanes = VT.getSizeInBits()/128;
4404 unsigned NumLaneElts = NumElts/NumLanes;
4406 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4407 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4408 int BitI = Mask[l+i];
4409 int BitI1 = Mask[l+i+1];
4410 if (!isUndefOrEqual(BitI, j))
4413 if (isUndefOrEqual(BitI1, NumElts))
4416 if (!isUndefOrEqual(BitI1, j+NumElts))
4424 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4425 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4427 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4428 unsigned NumElts = VT.getVectorNumElements();
4429 bool Is256BitVec = VT.is256BitVector();
4431 if (VT.is512BitVector())
4433 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4434 "Unsupported vector type for unpckh");
4436 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4437 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4440 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4441 // FIXME: Need a better way to get rid of this, there's no latency difference
4442 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4443 // the former later. We should also remove the "_undef" special mask.
4444 if (NumElts == 4 && Is256BitVec)
4447 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4448 // independently on 128-bit lanes.
4449 unsigned NumLanes = VT.getSizeInBits()/128;
4450 unsigned NumLaneElts = NumElts/NumLanes;
4452 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4453 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4454 int BitI = Mask[l+i];
4455 int BitI1 = Mask[l+i+1];
4457 if (!isUndefOrEqual(BitI, j))
4459 if (!isUndefOrEqual(BitI1, j))
4467 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4468 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4470 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4471 unsigned NumElts = VT.getVectorNumElements();
4473 if (VT.is512BitVector())
4476 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4477 "Unsupported vector type for unpckh");
4479 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4480 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4483 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4484 // independently on 128-bit lanes.
4485 unsigned NumLanes = VT.getSizeInBits()/128;
4486 unsigned NumLaneElts = NumElts/NumLanes;
4488 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4489 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4490 int BitI = Mask[l+i];
4491 int BitI1 = Mask[l+i+1];
4492 if (!isUndefOrEqual(BitI, j))
4494 if (!isUndefOrEqual(BitI1, j))
4501 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4502 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4503 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4504 if (!VT.is512BitVector())
4507 unsigned NumElts = VT.getVectorNumElements();
4508 unsigned HalfSize = NumElts/2;
4509 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4510 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4515 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4516 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4524 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4525 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4526 /// MOVSD, and MOVD, i.e. setting the lowest element.
4527 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4528 if (VT.getVectorElementType().getSizeInBits() < 32)
4530 if (!VT.is128BitVector())
4533 unsigned NumElts = VT.getVectorNumElements();
4535 if (!isUndefOrEqual(Mask[0], NumElts))
4538 for (unsigned i = 1; i != NumElts; ++i)
4539 if (!isUndefOrEqual(Mask[i], i))
4545 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4546 /// as permutations between 128-bit chunks or halves. As an example: this
4548 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4549 /// The first half comes from the second half of V1 and the second half from the
4550 /// the second half of V2.
4551 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4552 if (!HasFp256 || !VT.is256BitVector())
4555 // The shuffle result is divided into half A and half B. In total the two
4556 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4557 // B must come from C, D, E or F.
4558 unsigned HalfSize = VT.getVectorNumElements()/2;
4559 bool MatchA = false, MatchB = false;
4561 // Check if A comes from one of C, D, E, F.
4562 for (unsigned Half = 0; Half != 4; ++Half) {
4563 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4569 // Check if B comes from one of C, D, E, F.
4570 for (unsigned Half = 0; Half != 4; ++Half) {
4571 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4577 return MatchA && MatchB;
4580 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4581 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4582 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4583 MVT VT = SVOp->getSimpleValueType(0);
4585 unsigned HalfSize = VT.getVectorNumElements()/2;
4587 unsigned FstHalf = 0, SndHalf = 0;
4588 for (unsigned i = 0; i < HalfSize; ++i) {
4589 if (SVOp->getMaskElt(i) > 0) {
4590 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4594 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4595 if (SVOp->getMaskElt(i) > 0) {
4596 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4601 return (FstHalf | (SndHalf << 4));
4604 // Symetric in-lane mask. Each lane has 4 elements (for imm8)
4605 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4606 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4610 unsigned NumElts = VT.getVectorNumElements();
4612 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4613 for (unsigned i = 0; i != NumElts; ++i) {
4616 Imm8 |= Mask[i] << (i*2);
4621 unsigned LaneSize = 4;
4622 SmallVector<int, 4> MaskVal(LaneSize, -1);
4624 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4625 for (unsigned i = 0; i != LaneSize; ++i) {
4626 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4630 if (MaskVal[i] < 0) {
4631 MaskVal[i] = Mask[i+l] - l;
4632 Imm8 |= MaskVal[i] << (i*2);
4635 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4642 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4643 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4644 /// Note that VPERMIL mask matching is different depending whether theunderlying
4645 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4646 /// to the same elements of the low, but to the higher half of the source.
4647 /// In VPERMILPD the two lanes could be shuffled independently of each other
4648 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4649 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4650 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4651 if (VT.getSizeInBits() < 256 || EltSize < 32)
4653 bool symetricMaskRequired = (EltSize == 32);
4654 unsigned NumElts = VT.getVectorNumElements();
4656 unsigned NumLanes = VT.getSizeInBits()/128;
4657 unsigned LaneSize = NumElts/NumLanes;
4658 // 2 or 4 elements in one lane
4660 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4661 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4662 for (unsigned i = 0; i != LaneSize; ++i) {
4663 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4665 if (symetricMaskRequired) {
4666 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4667 ExpectedMaskVal[i] = Mask[i+l] - l;
4670 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4678 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4679 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4680 /// element of vector 2 and the other elements to come from vector 1 in order.
4681 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4682 bool V2IsSplat = false, bool V2IsUndef = false) {
4683 if (!VT.is128BitVector())
4686 unsigned NumOps = VT.getVectorNumElements();
4687 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4690 if (!isUndefOrEqual(Mask[0], 0))
4693 for (unsigned i = 1; i != NumOps; ++i)
4694 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4695 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4696 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4702 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4703 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4704 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4705 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4706 const X86Subtarget *Subtarget) {
4707 if (!Subtarget->hasSSE3())
4710 unsigned NumElems = VT.getVectorNumElements();
4712 if ((VT.is128BitVector() && NumElems != 4) ||
4713 (VT.is256BitVector() && NumElems != 8) ||
4714 (VT.is512BitVector() && NumElems != 16))
4717 // "i+1" is the value the indexed mask element must have
4718 for (unsigned i = 0; i != NumElems; i += 2)
4719 if (!isUndefOrEqual(Mask[i], i+1) ||
4720 !isUndefOrEqual(Mask[i+1], i+1))
4726 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4727 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4728 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4729 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4730 const X86Subtarget *Subtarget) {
4731 if (!Subtarget->hasSSE3())
4734 unsigned NumElems = VT.getVectorNumElements();
4736 if ((VT.is128BitVector() && NumElems != 4) ||
4737 (VT.is256BitVector() && NumElems != 8) ||
4738 (VT.is512BitVector() && NumElems != 16))
4741 // "i" is the value the indexed mask element must have
4742 for (unsigned i = 0; i != NumElems; i += 2)
4743 if (!isUndefOrEqual(Mask[i], i) ||
4744 !isUndefOrEqual(Mask[i+1], i))
4750 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4751 /// specifies a shuffle of elements that is suitable for input to 256-bit
4752 /// version of MOVDDUP.
4753 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4754 if (!HasFp256 || !VT.is256BitVector())
4757 unsigned NumElts = VT.getVectorNumElements();
4761 for (unsigned i = 0; i != NumElts/2; ++i)
4762 if (!isUndefOrEqual(Mask[i], 0))
4764 for (unsigned i = NumElts/2; i != NumElts; ++i)
4765 if (!isUndefOrEqual(Mask[i], NumElts/2))
4770 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4771 /// specifies a shuffle of elements that is suitable for input to 128-bit
4772 /// version of MOVDDUP.
4773 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4774 if (!VT.is128BitVector())
4777 unsigned e = VT.getVectorNumElements() / 2;
4778 for (unsigned i = 0; i != e; ++i)
4779 if (!isUndefOrEqual(Mask[i], i))
4781 for (unsigned i = 0; i != e; ++i)
4782 if (!isUndefOrEqual(Mask[e+i], i))
4787 /// isVEXTRACTIndex - Return true if the specified
4788 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4789 /// suitable for instruction that extract 128 or 256 bit vectors
4790 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4791 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4792 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4795 // The index should be aligned on a vecWidth-bit boundary.
4797 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4799 MVT VT = N->getSimpleValueType(0);
4800 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4801 bool Result = (Index * ElSize) % vecWidth == 0;
4806 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4807 /// operand specifies a subvector insert that is suitable for input to
4808 /// insertion of 128 or 256-bit subvectors
4809 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4810 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4811 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4813 // The index should be aligned on a vecWidth-bit boundary.
4815 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4817 MVT VT = N->getSimpleValueType(0);
4818 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4819 bool Result = (Index * ElSize) % vecWidth == 0;
4824 bool X86::isVINSERT128Index(SDNode *N) {
4825 return isVINSERTIndex(N, 128);
4828 bool X86::isVINSERT256Index(SDNode *N) {
4829 return isVINSERTIndex(N, 256);
4832 bool X86::isVEXTRACT128Index(SDNode *N) {
4833 return isVEXTRACTIndex(N, 128);
4836 bool X86::isVEXTRACT256Index(SDNode *N) {
4837 return isVEXTRACTIndex(N, 256);
4840 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4841 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4842 /// Handles 128-bit and 256-bit.
4843 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4844 MVT VT = N->getSimpleValueType(0);
4846 assert((VT.getSizeInBits() >= 128) &&
4847 "Unsupported vector type for PSHUF/SHUFP");
4849 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4850 // independently on 128-bit lanes.
4851 unsigned NumElts = VT.getVectorNumElements();
4852 unsigned NumLanes = VT.getSizeInBits()/128;
4853 unsigned NumLaneElts = NumElts/NumLanes;
4855 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4856 "Only supports 2, 4 or 8 elements per lane");
4858 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4860 for (unsigned i = 0; i != NumElts; ++i) {
4861 int Elt = N->getMaskElt(i);
4862 if (Elt < 0) continue;
4863 Elt &= NumLaneElts - 1;
4864 unsigned ShAmt = (i << Shift) % 8;
4865 Mask |= Elt << ShAmt;
4871 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4872 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4873 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4874 MVT VT = N->getSimpleValueType(0);
4876 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4877 "Unsupported vector type for PSHUFHW");
4879 unsigned NumElts = VT.getVectorNumElements();
4882 for (unsigned l = 0; l != NumElts; l += 8) {
4883 // 8 nodes per lane, but we only care about the last 4.
4884 for (unsigned i = 0; i < 4; ++i) {
4885 int Elt = N->getMaskElt(l+i+4);
4886 if (Elt < 0) continue;
4887 Elt &= 0x3; // only 2-bits.
4888 Mask |= Elt << (i * 2);
4895 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4896 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4897 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4898 MVT VT = N->getSimpleValueType(0);
4900 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4901 "Unsupported vector type for PSHUFHW");
4903 unsigned NumElts = VT.getVectorNumElements();
4906 for (unsigned l = 0; l != NumElts; l += 8) {
4907 // 8 nodes per lane, but we only care about the first 4.
4908 for (unsigned i = 0; i < 4; ++i) {
4909 int Elt = N->getMaskElt(l+i);
4910 if (Elt < 0) continue;
4911 Elt &= 0x3; // only 2-bits
4912 Mask |= Elt << (i * 2);
4919 /// \brief Return the appropriate immediate to shuffle the specified
4920 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4921 /// VALIGN (if Interlane is true) instructions.
4922 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4924 MVT VT = SVOp->getSimpleValueType(0);
4925 unsigned EltSize = InterLane ? 1 :
4926 VT.getVectorElementType().getSizeInBits() >> 3;
4928 unsigned NumElts = VT.getVectorNumElements();
4929 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4930 unsigned NumLaneElts = NumElts/NumLanes;
4934 for (i = 0; i != NumElts; ++i) {
4935 Val = SVOp->getMaskElt(i);
4939 if (Val >= (int)NumElts)
4940 Val -= NumElts - NumLaneElts;
4942 assert(Val - i > 0 && "PALIGNR imm should be positive");
4943 return (Val - i) * EltSize;
4946 /// \brief Return the appropriate immediate to shuffle the specified
4947 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4948 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4949 return getShuffleAlignrImmediate(SVOp, false);
4952 /// \brief Return the appropriate immediate to shuffle the specified
4953 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4954 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4955 return getShuffleAlignrImmediate(SVOp, true);
4959 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4960 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4961 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4962 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4965 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4967 MVT VecVT = N->getOperand(0).getSimpleValueType();
4968 MVT ElVT = VecVT.getVectorElementType();
4970 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4971 return Index / NumElemsPerChunk;
4974 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4975 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4976 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4977 llvm_unreachable("Illegal insert subvector for VINSERT");
4980 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4982 MVT VecVT = N->getSimpleValueType(0);
4983 MVT ElVT = VecVT.getVectorElementType();
4985 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4986 return Index / NumElemsPerChunk;
4989 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4990 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4991 /// and VINSERTI128 instructions.
4992 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4993 return getExtractVEXTRACTImmediate(N, 128);
4996 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
4997 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
4998 /// and VINSERTI64x4 instructions.
4999 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
5000 return getExtractVEXTRACTImmediate(N, 256);
5003 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5004 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5005 /// and VINSERTI128 instructions.
5006 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5007 return getInsertVINSERTImmediate(N, 128);
5010 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5011 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5012 /// and VINSERTI64x4 instructions.
5013 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5014 return getInsertVINSERTImmediate(N, 256);
5017 /// isZero - Returns true if Elt is a constant integer zero
5018 static bool isZero(SDValue V) {
5019 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5020 return C && C->isNullValue();
5023 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5025 bool X86::isZeroNode(SDValue Elt) {
5028 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5029 return CFP->getValueAPF().isPosZero();
5033 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5034 /// match movhlps. The lower half elements should come from upper half of
5035 /// V1 (and in order), and the upper half elements should come from the upper
5036 /// half of V2 (and in order).
5037 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5038 if (!VT.is128BitVector())
5040 if (VT.getVectorNumElements() != 4)
5042 for (unsigned i = 0, e = 2; i != e; ++i)
5043 if (!isUndefOrEqual(Mask[i], i+2))
5045 for (unsigned i = 2; i != 4; ++i)
5046 if (!isUndefOrEqual(Mask[i], i+4))
5051 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5052 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5054 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5055 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5057 N = N->getOperand(0).getNode();
5058 if (!ISD::isNON_EXTLoad(N))
5061 *LD = cast<LoadSDNode>(N);
5065 // Test whether the given value is a vector value which will be legalized
5067 static bool WillBeConstantPoolLoad(SDNode *N) {
5068 if (N->getOpcode() != ISD::BUILD_VECTOR)
5071 // Check for any non-constant elements.
5072 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5073 switch (N->getOperand(i).getNode()->getOpcode()) {
5075 case ISD::ConstantFP:
5082 // Vectors of all-zeros and all-ones are materialized with special
5083 // instructions rather than being loaded.
5084 return !ISD::isBuildVectorAllZeros(N) &&
5085 !ISD::isBuildVectorAllOnes(N);
5088 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5089 /// match movlp{s|d}. The lower half elements should come from lower half of
5090 /// V1 (and in order), and the upper half elements should come from the upper
5091 /// half of V2 (and in order). And since V1 will become the source of the
5092 /// MOVLP, it must be either a vector load or a scalar load to vector.
5093 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5094 ArrayRef<int> Mask, MVT VT) {
5095 if (!VT.is128BitVector())
5098 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5100 // Is V2 is a vector load, don't do this transformation. We will try to use
5101 // load folding shufps op.
5102 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5105 unsigned NumElems = VT.getVectorNumElements();
5107 if (NumElems != 2 && NumElems != 4)
5109 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5110 if (!isUndefOrEqual(Mask[i], i))
5112 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5113 if (!isUndefOrEqual(Mask[i], i+NumElems))
5118 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5119 /// to an zero vector.
5120 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5121 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5122 SDValue V1 = N->getOperand(0);
5123 SDValue V2 = N->getOperand(1);
5124 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5125 for (unsigned i = 0; i != NumElems; ++i) {
5126 int Idx = N->getMaskElt(i);
5127 if (Idx >= (int)NumElems) {
5128 unsigned Opc = V2.getOpcode();
5129 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5131 if (Opc != ISD::BUILD_VECTOR ||
5132 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5134 } else if (Idx >= 0) {
5135 unsigned Opc = V1.getOpcode();
5136 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5138 if (Opc != ISD::BUILD_VECTOR ||
5139 !X86::isZeroNode(V1.getOperand(Idx)))
5146 /// getZeroVector - Returns a vector of specified type with all zero elements.
5148 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5149 SelectionDAG &DAG, SDLoc dl) {
5150 assert(VT.isVector() && "Expected a vector type");
5152 // Always build SSE zero vectors as <4 x i32> bitcasted
5153 // to their dest type. This ensures they get CSE'd.
5155 if (VT.is128BitVector()) { // SSE
5156 if (Subtarget->hasSSE2()) { // SSE2
5157 SDValue Cst = DAG.getConstant(0, MVT::i32);
5158 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5160 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5161 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5163 } else if (VT.is256BitVector()) { // AVX
5164 if (Subtarget->hasInt256()) { // AVX2
5165 SDValue Cst = DAG.getConstant(0, MVT::i32);
5166 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5167 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5169 // 256-bit logic and arithmetic instructions in AVX are all
5170 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5171 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5172 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5173 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5175 } else if (VT.is512BitVector()) { // AVX-512
5176 SDValue Cst = DAG.getConstant(0, MVT::i32);
5177 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5178 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5179 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5180 } else if (VT.getScalarType() == MVT::i1) {
5181 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5182 SDValue Cst = DAG.getConstant(0, MVT::i1);
5183 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5184 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5186 llvm_unreachable("Unexpected vector type");
5188 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5191 /// getOnesVector - Returns a vector of specified type with all bits set.
5192 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5193 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5194 /// Then bitcast to their original type, ensuring they get CSE'd.
5195 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5197 assert(VT.isVector() && "Expected a vector type");
5199 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5201 if (VT.is256BitVector()) {
5202 if (HasInt256) { // AVX2
5203 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5204 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5206 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5207 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5209 } else if (VT.is128BitVector()) {
5210 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5212 llvm_unreachable("Unexpected vector type");
5214 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5217 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5218 /// that point to V2 points to its first element.
5219 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5220 for (unsigned i = 0; i != NumElems; ++i) {
5221 if (Mask[i] > (int)NumElems) {
5227 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5228 /// operation of specified width.
5229 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5231 unsigned NumElems = VT.getVectorNumElements();
5232 SmallVector<int, 8> Mask;
5233 Mask.push_back(NumElems);
5234 for (unsigned i = 1; i != NumElems; ++i)
5236 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5239 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5240 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5242 unsigned NumElems = VT.getVectorNumElements();
5243 SmallVector<int, 8> Mask;
5244 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5246 Mask.push_back(i + NumElems);
5248 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5251 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5252 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5254 unsigned NumElems = VT.getVectorNumElements();
5255 SmallVector<int, 8> Mask;
5256 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5257 Mask.push_back(i + Half);
5258 Mask.push_back(i + NumElems + Half);
5260 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5263 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5264 // a generic shuffle instruction because the target has no such instructions.
5265 // Generate shuffles which repeat i16 and i8 several times until they can be
5266 // represented by v4f32 and then be manipulated by target suported shuffles.
5267 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5268 MVT VT = V.getSimpleValueType();
5269 int NumElems = VT.getVectorNumElements();
5272 while (NumElems > 4) {
5273 if (EltNo < NumElems/2) {
5274 V = getUnpackl(DAG, dl, VT, V, V);
5276 V = getUnpackh(DAG, dl, VT, V, V);
5277 EltNo -= NumElems/2;
5284 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5285 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5286 MVT VT = V.getSimpleValueType();
5289 if (VT.is128BitVector()) {
5290 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5291 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5292 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5294 } else if (VT.is256BitVector()) {
5295 // To use VPERMILPS to splat scalars, the second half of indicies must
5296 // refer to the higher part, which is a duplication of the lower one,
5297 // because VPERMILPS can only handle in-lane permutations.
5298 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5299 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5301 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5302 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5305 llvm_unreachable("Vector size not supported");
5307 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5310 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5311 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5312 MVT SrcVT = SV->getSimpleValueType(0);
5313 SDValue V1 = SV->getOperand(0);
5316 int EltNo = SV->getSplatIndex();
5317 int NumElems = SrcVT.getVectorNumElements();
5318 bool Is256BitVec = SrcVT.is256BitVector();
5320 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5321 "Unknown how to promote splat for type");
5323 // Extract the 128-bit part containing the splat element and update
5324 // the splat element index when it refers to the higher register.
5326 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5327 if (EltNo >= NumElems/2)
5328 EltNo -= NumElems/2;
5331 // All i16 and i8 vector types can't be used directly by a generic shuffle
5332 // instruction because the target has no such instruction. Generate shuffles
5333 // which repeat i16 and i8 several times until they fit in i32, and then can
5334 // be manipulated by target suported shuffles.
5335 MVT EltVT = SrcVT.getVectorElementType();
5336 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5337 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5339 // Recreate the 256-bit vector and place the same 128-bit vector
5340 // into the low and high part. This is necessary because we want
5341 // to use VPERM* to shuffle the vectors
5343 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5346 return getLegalSplat(DAG, V1, EltNo);
5349 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5350 /// vector of zero or undef vector. This produces a shuffle where the low
5351 /// element of V2 is swizzled into the zero/undef vector, landing at element
5352 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5353 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5355 const X86Subtarget *Subtarget,
5356 SelectionDAG &DAG) {
5357 MVT VT = V2.getSimpleValueType();
5359 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5360 unsigned NumElems = VT.getVectorNumElements();
5361 SmallVector<int, 16> MaskVec;
5362 for (unsigned i = 0; i != NumElems; ++i)
5363 // If this is the insertion idx, put the low elt of V2 here.
5364 MaskVec.push_back(i == Idx ? NumElems : i);
5365 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5368 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5369 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5370 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5371 /// shuffles which use a single input multiple times, and in those cases it will
5372 /// adjust the mask to only have indices within that single input.
5373 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5374 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5375 unsigned NumElems = VT.getVectorNumElements();
5379 bool IsFakeUnary = false;
5380 switch(N->getOpcode()) {
5381 case X86ISD::BLENDI:
5382 ImmN = N->getOperand(N->getNumOperands()-1);
5383 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5386 ImmN = N->getOperand(N->getNumOperands()-1);
5387 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5388 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5390 case X86ISD::UNPCKH:
5391 DecodeUNPCKHMask(VT, Mask);
5392 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5394 case X86ISD::UNPCKL:
5395 DecodeUNPCKLMask(VT, Mask);
5396 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5398 case X86ISD::MOVHLPS:
5399 DecodeMOVHLPSMask(NumElems, Mask);
5400 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5402 case X86ISD::MOVLHPS:
5403 DecodeMOVLHPSMask(NumElems, Mask);
5404 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5406 case X86ISD::PALIGNR:
5407 ImmN = N->getOperand(N->getNumOperands()-1);
5408 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5410 case X86ISD::PSHUFD:
5411 case X86ISD::VPERMILPI:
5412 ImmN = N->getOperand(N->getNumOperands()-1);
5413 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5416 case X86ISD::PSHUFHW:
5417 ImmN = N->getOperand(N->getNumOperands()-1);
5418 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5421 case X86ISD::PSHUFLW:
5422 ImmN = N->getOperand(N->getNumOperands()-1);
5423 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5426 case X86ISD::PSHUFB: {
5428 SDValue MaskNode = N->getOperand(1);
5429 while (MaskNode->getOpcode() == ISD::BITCAST)
5430 MaskNode = MaskNode->getOperand(0);
5432 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5433 // If we have a build-vector, then things are easy.
5434 EVT VT = MaskNode.getValueType();
5435 assert(VT.isVector() &&
5436 "Can't produce a non-vector with a build_vector!");
5437 if (!VT.isInteger())
5440 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5442 SmallVector<uint64_t, 32> RawMask;
5443 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5444 SDValue Op = MaskNode->getOperand(i);
5445 if (Op->getOpcode() == ISD::UNDEF) {
5446 RawMask.push_back((uint64_t)SM_SentinelUndef);
5449 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5452 APInt MaskElement = CN->getAPIntValue();
5454 // We now have to decode the element which could be any integer size and
5455 // extract each byte of it.
5456 for (int j = 0; j < NumBytesPerElement; ++j) {
5457 // Note that this is x86 and so always little endian: the low byte is
5458 // the first byte of the mask.
5459 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5460 MaskElement = MaskElement.lshr(8);
5463 DecodePSHUFBMask(RawMask, Mask);
5467 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5471 SDValue Ptr = MaskLoad->getBasePtr();
5472 if (Ptr->getOpcode() == X86ISD::Wrapper)
5473 Ptr = Ptr->getOperand(0);
5475 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5476 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5479 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5480 DecodePSHUFBMask(C, Mask);
5486 case X86ISD::VPERMI:
5487 ImmN = N->getOperand(N->getNumOperands()-1);
5488 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5493 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5495 case X86ISD::VPERM2X128:
5496 ImmN = N->getOperand(N->getNumOperands()-1);
5497 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5498 if (Mask.empty()) return false;
5500 case X86ISD::MOVSLDUP:
5501 DecodeMOVSLDUPMask(VT, Mask);
5504 case X86ISD::MOVSHDUP:
5505 DecodeMOVSHDUPMask(VT, Mask);
5508 case X86ISD::MOVDDUP:
5509 DecodeMOVDDUPMask(VT, Mask);
5512 case X86ISD::MOVLHPD:
5513 case X86ISD::MOVLPD:
5514 case X86ISD::MOVLPS:
5515 // Not yet implemented
5517 default: llvm_unreachable("unknown target shuffle node");
5520 // If we have a fake unary shuffle, the shuffle mask is spread across two
5521 // inputs that are actually the same node. Re-map the mask to always point
5522 // into the first input.
5525 if (M >= (int)Mask.size())
5531 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5532 /// element of the result of the vector shuffle.
5533 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5536 return SDValue(); // Limit search depth.
5538 SDValue V = SDValue(N, 0);
5539 EVT VT = V.getValueType();
5540 unsigned Opcode = V.getOpcode();
5542 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5543 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5544 int Elt = SV->getMaskElt(Index);
5547 return DAG.getUNDEF(VT.getVectorElementType());
5549 unsigned NumElems = VT.getVectorNumElements();
5550 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5551 : SV->getOperand(1);
5552 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5555 // Recurse into target specific vector shuffles to find scalars.
5556 if (isTargetShuffle(Opcode)) {
5557 MVT ShufVT = V.getSimpleValueType();
5558 unsigned NumElems = ShufVT.getVectorNumElements();
5559 SmallVector<int, 16> ShuffleMask;
5562 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5565 int Elt = ShuffleMask[Index];
5567 return DAG.getUNDEF(ShufVT.getVectorElementType());
5569 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5571 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5575 // Actual nodes that may contain scalar elements
5576 if (Opcode == ISD::BITCAST) {
5577 V = V.getOperand(0);
5578 EVT SrcVT = V.getValueType();
5579 unsigned NumElems = VT.getVectorNumElements();
5581 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5585 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5586 return (Index == 0) ? V.getOperand(0)
5587 : DAG.getUNDEF(VT.getVectorElementType());
5589 if (V.getOpcode() == ISD::BUILD_VECTOR)
5590 return V.getOperand(Index);
5595 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5596 /// shuffle operation which come from a consecutively from a zero. The
5597 /// search can start in two different directions, from left or right.
5598 /// We count undefs as zeros until PreferredNum is reached.
5599 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5600 unsigned NumElems, bool ZerosFromLeft,
5602 unsigned PreferredNum = -1U) {
5603 unsigned NumZeros = 0;
5604 for (unsigned i = 0; i != NumElems; ++i) {
5605 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5606 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5610 if (X86::isZeroNode(Elt))
5612 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5613 NumZeros = std::min(NumZeros + 1, PreferredNum);
5621 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5622 /// correspond consecutively to elements from one of the vector operands,
5623 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5625 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5626 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5627 unsigned NumElems, unsigned &OpNum) {
5628 bool SeenV1 = false;
5629 bool SeenV2 = false;
5631 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5632 int Idx = SVOp->getMaskElt(i);
5633 // Ignore undef indicies
5637 if (Idx < (int)NumElems)
5642 // Only accept consecutive elements from the same vector
5643 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5647 OpNum = SeenV1 ? 0 : 1;
5651 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5652 /// logical left shift of a vector.
5653 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5654 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5656 SVOp->getSimpleValueType(0).getVectorNumElements();
5657 unsigned NumZeros = getNumOfConsecutiveZeros(
5658 SVOp, NumElems, false /* check zeros from right */, DAG,
5659 SVOp->getMaskElt(0));
5665 // Considering the elements in the mask that are not consecutive zeros,
5666 // check if they consecutively come from only one of the source vectors.
5668 // V1 = {X, A, B, C} 0
5670 // vector_shuffle V1, V2 <1, 2, 3, X>
5672 if (!isShuffleMaskConsecutive(SVOp,
5673 0, // Mask Start Index
5674 NumElems-NumZeros, // Mask End Index(exclusive)
5675 NumZeros, // Where to start looking in the src vector
5676 NumElems, // Number of elements in vector
5677 OpSrc)) // Which source operand ?
5682 ShVal = SVOp->getOperand(OpSrc);
5686 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5687 /// logical left shift of a vector.
5688 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5689 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5691 SVOp->getSimpleValueType(0).getVectorNumElements();
5692 unsigned NumZeros = getNumOfConsecutiveZeros(
5693 SVOp, NumElems, true /* check zeros from left */, DAG,
5694 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5700 // Considering the elements in the mask that are not consecutive zeros,
5701 // check if they consecutively come from only one of the source vectors.
5703 // 0 { A, B, X, X } = V2
5705 // vector_shuffle V1, V2 <X, X, 4, 5>
5707 if (!isShuffleMaskConsecutive(SVOp,
5708 NumZeros, // Mask Start Index
5709 NumElems, // Mask End Index(exclusive)
5710 0, // Where to start looking in the src vector
5711 NumElems, // Number of elements in vector
5712 OpSrc)) // Which source operand ?
5717 ShVal = SVOp->getOperand(OpSrc);
5721 /// isVectorShift - Returns true if the shuffle can be implemented as a
5722 /// logical left or right shift of a vector.
5723 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5724 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5725 // Although the logic below support any bitwidth size, there are no
5726 // shift instructions which handle more than 128-bit vectors.
5727 if (!SVOp->getSimpleValueType(0).is128BitVector())
5730 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5731 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5737 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5739 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5740 unsigned NumNonZero, unsigned NumZero,
5742 const X86Subtarget* Subtarget,
5743 const TargetLowering &TLI) {
5750 for (unsigned i = 0; i < 16; ++i) {
5751 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5752 if (ThisIsNonZero && First) {
5754 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5756 V = DAG.getUNDEF(MVT::v8i16);
5761 SDValue ThisElt, LastElt;
5762 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5763 if (LastIsNonZero) {
5764 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5765 MVT::i16, Op.getOperand(i-1));
5767 if (ThisIsNonZero) {
5768 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5769 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5770 ThisElt, DAG.getConstant(8, MVT::i8));
5772 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5776 if (ThisElt.getNode())
5777 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5778 DAG.getIntPtrConstant(i/2));
5782 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5785 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5787 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5788 unsigned NumNonZero, unsigned NumZero,
5790 const X86Subtarget* Subtarget,
5791 const TargetLowering &TLI) {
5798 for (unsigned i = 0; i < 8; ++i) {
5799 bool isNonZero = (NonZeros & (1 << i)) != 0;
5803 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5805 V = DAG.getUNDEF(MVT::v8i16);
5808 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5809 MVT::v8i16, V, Op.getOperand(i),
5810 DAG.getIntPtrConstant(i));
5817 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5818 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5819 const X86Subtarget *Subtarget,
5820 const TargetLowering &TLI) {
5821 // Find all zeroable elements.
5823 for (int i=0; i < 4; ++i) {
5824 SDValue Elt = Op->getOperand(i);
5825 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5827 assert(std::count_if(&Zeroable[0], &Zeroable[4],
5828 [](bool M) { return !M; }) > 1 &&
5829 "We expect at least two non-zero elements!");
5831 // We only know how to deal with build_vector nodes where elements are either
5832 // zeroable or extract_vector_elt with constant index.
5833 SDValue FirstNonZero;
5834 unsigned FirstNonZeroIdx;
5835 for (unsigned i=0; i < 4; ++i) {
5838 SDValue Elt = Op->getOperand(i);
5839 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5840 !isa<ConstantSDNode>(Elt.getOperand(1)))
5842 // Make sure that this node is extracting from a 128-bit vector.
5843 MVT VT = Elt.getOperand(0).getSimpleValueType();
5844 if (!VT.is128BitVector())
5846 if (!FirstNonZero.getNode()) {
5848 FirstNonZeroIdx = i;
5852 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5853 SDValue V1 = FirstNonZero.getOperand(0);
5854 MVT VT = V1.getSimpleValueType();
5856 // See if this build_vector can be lowered as a blend with zero.
5858 unsigned EltMaskIdx, EltIdx;
5860 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5861 if (Zeroable[EltIdx]) {
5862 // The zero vector will be on the right hand side.
5863 Mask[EltIdx] = EltIdx+4;
5867 Elt = Op->getOperand(EltIdx);
5868 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5869 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5870 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5872 Mask[EltIdx] = EltIdx;
5876 // Let the shuffle legalizer deal with blend operations.
5877 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5878 if (V1.getSimpleValueType() != VT)
5879 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5880 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5883 // See if we can lower this build_vector to a INSERTPS.
5884 if (!Subtarget->hasSSE41())
5887 SDValue V2 = Elt.getOperand(0);
5888 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5891 bool CanFold = true;
5892 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5896 SDValue Current = Op->getOperand(i);
5897 SDValue SrcVector = Current->getOperand(0);
5900 CanFold = SrcVector == V1 &&
5901 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5907 assert(V1.getNode() && "Expected at least two non-zero elements!");
5908 if (V1.getSimpleValueType() != MVT::v4f32)
5909 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5910 if (V2.getSimpleValueType() != MVT::v4f32)
5911 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5913 // Ok, we can emit an INSERTPS instruction.
5915 for (int i = 0; i < 4; ++i)
5919 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5920 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5921 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5922 DAG.getIntPtrConstant(InsertPSMask));
5923 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5926 /// Return a vector logical shift node.
5927 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5928 unsigned NumBits, SelectionDAG &DAG,
5929 const TargetLowering &TLI, SDLoc dl) {
5930 assert(VT.is128BitVector() && "Unknown type for VShift");
5931 MVT ShVT = MVT::v2i64;
5932 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5933 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5934 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
5935 SDValue ShiftVal = DAG.getConstant(NumBits, ScalarShiftTy);
5936 return DAG.getNode(ISD::BITCAST, dl, VT,
5937 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5941 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5943 // Check if the scalar load can be widened into a vector load. And if
5944 // the address is "base + cst" see if the cst can be "absorbed" into
5945 // the shuffle mask.
5946 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5947 SDValue Ptr = LD->getBasePtr();
5948 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5950 EVT PVT = LD->getValueType(0);
5951 if (PVT != MVT::i32 && PVT != MVT::f32)
5956 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5957 FI = FINode->getIndex();
5959 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5960 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5961 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5962 Offset = Ptr.getConstantOperandVal(1);
5963 Ptr = Ptr.getOperand(0);
5968 // FIXME: 256-bit vector instructions don't require a strict alignment,
5969 // improve this code to support it better.
5970 unsigned RequiredAlign = VT.getSizeInBits()/8;
5971 SDValue Chain = LD->getChain();
5972 // Make sure the stack object alignment is at least 16 or 32.
5973 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5974 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5975 if (MFI->isFixedObjectIndex(FI)) {
5976 // Can't change the alignment. FIXME: It's possible to compute
5977 // the exact stack offset and reference FI + adjust offset instead.
5978 // If someone *really* cares about this. That's the way to implement it.
5981 MFI->setObjectAlignment(FI, RequiredAlign);
5985 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5986 // Ptr + (Offset & ~15).
5989 if ((Offset % RequiredAlign) & 3)
5991 int64_t StartOffset = Offset & ~(RequiredAlign-1);
5993 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
5994 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
5996 int EltNo = (Offset - StartOffset) >> 2;
5997 unsigned NumElems = VT.getVectorNumElements();
5999 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
6000 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
6001 LD->getPointerInfo().getWithOffset(StartOffset),
6002 false, false, false, 0);
6004 SmallVector<int, 8> Mask;
6005 for (unsigned i = 0; i != NumElems; ++i)
6006 Mask.push_back(EltNo);
6008 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6014 /// EltsFromConsecutiveLoads - Given the initializing elements 'Elts' of a
6015 /// vector of type 'VT', see if the elements can be replaced by a single large
6016 /// load which has the same value as a build_vector whose operands are 'elts'.
6018 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6020 /// FIXME: we'd also like to handle the case where the last elements are zero
6021 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6022 /// There's even a handy isZeroNode for that purpose.
6023 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6024 SDLoc &DL, SelectionDAG &DAG,
6025 bool isAfterLegalize) {
6026 EVT EltVT = VT.getVectorElementType();
6027 unsigned NumElems = Elts.size();
6029 LoadSDNode *LDBase = nullptr;
6030 unsigned LastLoadedElt = -1U;
6032 // For each element in the initializer, see if we've found a load or an undef.
6033 // If we don't find an initial load element, or later load elements are
6034 // non-consecutive, bail out.
6035 for (unsigned i = 0; i < NumElems; ++i) {
6036 SDValue Elt = Elts[i];
6038 if (!Elt.getNode() ||
6039 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6042 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6044 LDBase = cast<LoadSDNode>(Elt.getNode());
6048 if (Elt.getOpcode() == ISD::UNDEF)
6051 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6052 if (!DAG.isConsecutiveLoad(LD, LDBase, EltVT.getSizeInBits()/8, i))
6057 // If we have found an entire vector of loads and undefs, then return a large
6058 // load of the entire vector width starting at the base pointer. If we found
6059 // consecutive loads for the low half, generate a vzext_load node.
6060 if (LastLoadedElt == NumElems - 1) {
6062 if (isAfterLegalize &&
6063 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6066 SDValue NewLd = SDValue();
6068 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6069 LDBase->getPointerInfo(), LDBase->isVolatile(),
6070 LDBase->isNonTemporal(), LDBase->isInvariant(),
6071 LDBase->getAlignment());
6073 if (LDBase->hasAnyUseOfValue(1)) {
6074 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6076 SDValue(NewLd.getNode(), 1));
6077 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6078 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6079 SDValue(NewLd.getNode(), 1));
6085 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6086 //of a v4i32 / v4f32. It's probably worth generalizing.
6087 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6088 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6089 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6090 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6092 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6093 LDBase->getPointerInfo(),
6094 LDBase->getAlignment(),
6095 false/*isVolatile*/, true/*ReadMem*/,
6098 // Make sure the newly-created LOAD is in the same position as LDBase in
6099 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6100 // update uses of LDBase's output chain to use the TokenFactor.
6101 if (LDBase->hasAnyUseOfValue(1)) {
6102 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6103 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6104 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6105 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6106 SDValue(ResNode.getNode(), 1));
6109 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6114 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6115 /// to generate a splat value for the following cases:
6116 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6117 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6118 /// a scalar load, or a constant.
6119 /// The VBROADCAST node is returned when a pattern is found,
6120 /// or SDValue() otherwise.
6121 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6122 SelectionDAG &DAG) {
6123 // VBROADCAST requires AVX.
6124 // TODO: Splats could be generated for non-AVX CPUs using SSE
6125 // instructions, but there's less potential gain for only 128-bit vectors.
6126 if (!Subtarget->hasAVX())
6129 MVT VT = Op.getSimpleValueType();
6132 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6133 "Unsupported vector type for broadcast.");
6138 switch (Op.getOpcode()) {
6140 // Unknown pattern found.
6143 case ISD::BUILD_VECTOR: {
6144 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6145 BitVector UndefElements;
6146 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6148 // We need a splat of a single value to use broadcast, and it doesn't
6149 // make any sense if the value is only in one element of the vector.
6150 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6154 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6155 Ld.getOpcode() == ISD::ConstantFP);
6157 // Make sure that all of the users of a non-constant load are from the
6158 // BUILD_VECTOR node.
6159 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6164 case ISD::VECTOR_SHUFFLE: {
6165 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6167 // Shuffles must have a splat mask where the first element is
6169 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6172 SDValue Sc = Op.getOperand(0);
6173 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6174 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6176 if (!Subtarget->hasInt256())
6179 // Use the register form of the broadcast instruction available on AVX2.
6180 if (VT.getSizeInBits() >= 256)
6181 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6182 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6185 Ld = Sc.getOperand(0);
6186 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6187 Ld.getOpcode() == ISD::ConstantFP);
6189 // The scalar_to_vector node and the suspected
6190 // load node must have exactly one user.
6191 // Constants may have multiple users.
6193 // AVX-512 has register version of the broadcast
6194 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6195 Ld.getValueType().getSizeInBits() >= 32;
6196 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6203 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6204 bool IsGE256 = (VT.getSizeInBits() >= 256);
6206 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6207 // instruction to save 8 or more bytes of constant pool data.
6208 // TODO: If multiple splats are generated to load the same constant,
6209 // it may be detrimental to overall size. There needs to be a way to detect
6210 // that condition to know if this is truly a size win.
6211 const Function *F = DAG.getMachineFunction().getFunction();
6212 bool OptForSize = F->getAttributes().
6213 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
6215 // Handle broadcasting a single constant scalar from the constant pool
6217 // On Sandybridge (no AVX2), it is still better to load a constant vector
6218 // from the constant pool and not to broadcast it from a scalar.
6219 // But override that restriction when optimizing for size.
6220 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6221 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6222 EVT CVT = Ld.getValueType();
6223 assert(!CVT.isVector() && "Must not broadcast a vector type");
6225 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6226 // For size optimization, also splat v2f64 and v2i64, and for size opt
6227 // with AVX2, also splat i8 and i16.
6228 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6229 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6230 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6231 const Constant *C = nullptr;
6232 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6233 C = CI->getConstantIntValue();
6234 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6235 C = CF->getConstantFPValue();
6237 assert(C && "Invalid constant type");
6239 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6240 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6241 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6242 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6243 MachinePointerInfo::getConstantPool(),
6244 false, false, false, Alignment);
6246 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6250 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6252 // Handle AVX2 in-register broadcasts.
6253 if (!IsLoad && Subtarget->hasInt256() &&
6254 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6255 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6257 // The scalar source must be a normal load.
6261 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6262 (Subtarget->hasVLX() && ScalarSize == 64))
6263 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6265 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6266 // double since there is no vbroadcastsd xmm
6267 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6268 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6269 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6272 // Unsupported broadcast.
6276 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6277 /// underlying vector and index.
6279 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6281 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6283 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6284 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6287 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6289 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6291 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6292 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6295 // In this case the vector is the extract_subvector expression and the index
6296 // is 2, as specified by the shuffle.
6297 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6298 SDValue ShuffleVec = SVOp->getOperand(0);
6299 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6300 assert(ShuffleVecVT.getVectorElementType() ==
6301 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6303 int ShuffleIdx = SVOp->getMaskElt(Idx);
6304 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6305 ExtractedFromVec = ShuffleVec;
6311 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6312 MVT VT = Op.getSimpleValueType();
6314 // Skip if insert_vec_elt is not supported.
6315 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6316 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6320 unsigned NumElems = Op.getNumOperands();
6324 SmallVector<unsigned, 4> InsertIndices;
6325 SmallVector<int, 8> Mask(NumElems, -1);
6327 for (unsigned i = 0; i != NumElems; ++i) {
6328 unsigned Opc = Op.getOperand(i).getOpcode();
6330 if (Opc == ISD::UNDEF)
6333 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6334 // Quit if more than 1 elements need inserting.
6335 if (InsertIndices.size() > 1)
6338 InsertIndices.push_back(i);
6342 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6343 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6344 // Quit if non-constant index.
6345 if (!isa<ConstantSDNode>(ExtIdx))
6347 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6349 // Quit if extracted from vector of different type.
6350 if (ExtractedFromVec.getValueType() != VT)
6353 if (!VecIn1.getNode())
6354 VecIn1 = ExtractedFromVec;
6355 else if (VecIn1 != ExtractedFromVec) {
6356 if (!VecIn2.getNode())
6357 VecIn2 = ExtractedFromVec;
6358 else if (VecIn2 != ExtractedFromVec)
6359 // Quit if more than 2 vectors to shuffle
6363 if (ExtractedFromVec == VecIn1)
6365 else if (ExtractedFromVec == VecIn2)
6366 Mask[i] = Idx + NumElems;
6369 if (!VecIn1.getNode())
6372 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6373 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6374 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6375 unsigned Idx = InsertIndices[i];
6376 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6377 DAG.getIntPtrConstant(Idx));
6383 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6385 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6387 MVT VT = Op.getSimpleValueType();
6388 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6389 "Unexpected type in LowerBUILD_VECTORvXi1!");
6392 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6393 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6394 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6395 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6398 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6399 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6400 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6401 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6404 bool AllContants = true;
6405 uint64_t Immediate = 0;
6406 int NonConstIdx = -1;
6407 bool IsSplat = true;
6408 unsigned NumNonConsts = 0;
6409 unsigned NumConsts = 0;
6410 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6411 SDValue In = Op.getOperand(idx);
6412 if (In.getOpcode() == ISD::UNDEF)
6414 if (!isa<ConstantSDNode>(In)) {
6415 AllContants = false;
6420 if (cast<ConstantSDNode>(In)->getZExtValue())
6421 Immediate |= (1ULL << idx);
6423 if (In != Op.getOperand(0))
6428 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6429 DAG.getConstant(Immediate, MVT::i16));
6430 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6431 DAG.getIntPtrConstant(0));
6434 if (NumNonConsts == 1 && NonConstIdx != 0) {
6437 SDValue VecAsImm = DAG.getConstant(Immediate,
6438 MVT::getIntegerVT(VT.getSizeInBits()));
6439 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6442 DstVec = DAG.getUNDEF(VT);
6443 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6444 Op.getOperand(NonConstIdx),
6445 DAG.getIntPtrConstant(NonConstIdx));
6447 if (!IsSplat && (NonConstIdx != 0))
6448 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6449 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6452 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6453 DAG.getConstant(-1, SelectVT),
6454 DAG.getConstant(0, SelectVT));
6456 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6457 DAG.getConstant((Immediate | 1), SelectVT),
6458 DAG.getConstant(Immediate, SelectVT));
6459 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6462 /// \brief Return true if \p N implements a horizontal binop and return the
6463 /// operands for the horizontal binop into V0 and V1.
6465 /// This is a helper function of PerformBUILD_VECTORCombine.
6466 /// This function checks that the build_vector \p N in input implements a
6467 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6468 /// operation to match.
6469 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6470 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6471 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6474 /// This function only analyzes elements of \p N whose indices are
6475 /// in range [BaseIdx, LastIdx).
6476 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6478 unsigned BaseIdx, unsigned LastIdx,
6479 SDValue &V0, SDValue &V1) {
6480 EVT VT = N->getValueType(0);
6482 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6483 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6484 "Invalid Vector in input!");
6486 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6487 bool CanFold = true;
6488 unsigned ExpectedVExtractIdx = BaseIdx;
6489 unsigned NumElts = LastIdx - BaseIdx;
6490 V0 = DAG.getUNDEF(VT);
6491 V1 = DAG.getUNDEF(VT);
6493 // Check if N implements a horizontal binop.
6494 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6495 SDValue Op = N->getOperand(i + BaseIdx);
6498 if (Op->getOpcode() == ISD::UNDEF) {
6499 // Update the expected vector extract index.
6500 if (i * 2 == NumElts)
6501 ExpectedVExtractIdx = BaseIdx;
6502 ExpectedVExtractIdx += 2;
6506 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6511 SDValue Op0 = Op.getOperand(0);
6512 SDValue Op1 = Op.getOperand(1);
6514 // Try to match the following pattern:
6515 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6516 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6517 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6518 Op0.getOperand(0) == Op1.getOperand(0) &&
6519 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6520 isa<ConstantSDNode>(Op1.getOperand(1)));
6524 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6525 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6527 if (i * 2 < NumElts) {
6528 if (V0.getOpcode() == ISD::UNDEF)
6529 V0 = Op0.getOperand(0);
6531 if (V1.getOpcode() == ISD::UNDEF)
6532 V1 = Op0.getOperand(0);
6533 if (i * 2 == NumElts)
6534 ExpectedVExtractIdx = BaseIdx;
6537 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6538 if (I0 == ExpectedVExtractIdx)
6539 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6540 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6541 // Try to match the following dag sequence:
6542 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6543 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6547 ExpectedVExtractIdx += 2;
6553 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6554 /// a concat_vector.
6556 /// This is a helper function of PerformBUILD_VECTORCombine.
6557 /// This function expects two 256-bit vectors called V0 and V1.
6558 /// At first, each vector is split into two separate 128-bit vectors.
6559 /// Then, the resulting 128-bit vectors are used to implement two
6560 /// horizontal binary operations.
6562 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6564 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6565 /// the two new horizontal binop.
6566 /// When Mode is set, the first horizontal binop dag node would take as input
6567 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6568 /// horizontal binop dag node would take as input the lower 128-bit of V1
6569 /// and the upper 128-bit of V1.
6571 /// HADD V0_LO, V0_HI
6572 /// HADD V1_LO, V1_HI
6574 /// Otherwise, the first horizontal binop dag node takes as input the lower
6575 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6576 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6578 /// HADD V0_LO, V1_LO
6579 /// HADD V0_HI, V1_HI
6581 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6582 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6583 /// the upper 128-bits of the result.
6584 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6585 SDLoc DL, SelectionDAG &DAG,
6586 unsigned X86Opcode, bool Mode,
6587 bool isUndefLO, bool isUndefHI) {
6588 EVT VT = V0.getValueType();
6589 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6590 "Invalid nodes in input!");
6592 unsigned NumElts = VT.getVectorNumElements();
6593 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6594 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6595 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6596 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6597 EVT NewVT = V0_LO.getValueType();
6599 SDValue LO = DAG.getUNDEF(NewVT);
6600 SDValue HI = DAG.getUNDEF(NewVT);
6603 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6604 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6605 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6606 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6607 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6609 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6610 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6611 V1_LO->getOpcode() != ISD::UNDEF))
6612 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6614 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6615 V1_HI->getOpcode() != ISD::UNDEF))
6616 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6619 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6622 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6623 /// sequence of 'vadd + vsub + blendi'.
6624 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6625 const X86Subtarget *Subtarget) {
6627 EVT VT = BV->getValueType(0);
6628 unsigned NumElts = VT.getVectorNumElements();
6629 SDValue InVec0 = DAG.getUNDEF(VT);
6630 SDValue InVec1 = DAG.getUNDEF(VT);
6632 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6633 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6635 // Odd-numbered elements in the input build vector are obtained from
6636 // adding two integer/float elements.
6637 // Even-numbered elements in the input build vector are obtained from
6638 // subtracting two integer/float elements.
6639 unsigned ExpectedOpcode = ISD::FSUB;
6640 unsigned NextExpectedOpcode = ISD::FADD;
6641 bool AddFound = false;
6642 bool SubFound = false;
6644 for (unsigned i = 0, e = NumElts; i != e; i++) {
6645 SDValue Op = BV->getOperand(i);
6647 // Skip 'undef' values.
6648 unsigned Opcode = Op.getOpcode();
6649 if (Opcode == ISD::UNDEF) {
6650 std::swap(ExpectedOpcode, NextExpectedOpcode);
6654 // Early exit if we found an unexpected opcode.
6655 if (Opcode != ExpectedOpcode)
6658 SDValue Op0 = Op.getOperand(0);
6659 SDValue Op1 = Op.getOperand(1);
6661 // Try to match the following pattern:
6662 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6663 // Early exit if we cannot match that sequence.
6664 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6665 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6666 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6667 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6668 Op0.getOperand(1) != Op1.getOperand(1))
6671 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6675 // We found a valid add/sub node. Update the information accordingly.
6681 // Update InVec0 and InVec1.
6682 if (InVec0.getOpcode() == ISD::UNDEF)
6683 InVec0 = Op0.getOperand(0);
6684 if (InVec1.getOpcode() == ISD::UNDEF)
6685 InVec1 = Op1.getOperand(0);
6687 // Make sure that operands in input to each add/sub node always
6688 // come from a same pair of vectors.
6689 if (InVec0 != Op0.getOperand(0)) {
6690 if (ExpectedOpcode == ISD::FSUB)
6693 // FADD is commutable. Try to commute the operands
6694 // and then test again.
6695 std::swap(Op0, Op1);
6696 if (InVec0 != Op0.getOperand(0))
6700 if (InVec1 != Op1.getOperand(0))
6703 // Update the pair of expected opcodes.
6704 std::swap(ExpectedOpcode, NextExpectedOpcode);
6707 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6708 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6709 InVec1.getOpcode() != ISD::UNDEF)
6710 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6715 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6716 const X86Subtarget *Subtarget) {
6718 EVT VT = N->getValueType(0);
6719 unsigned NumElts = VT.getVectorNumElements();
6720 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6721 SDValue InVec0, InVec1;
6723 // Try to match an ADDSUB.
6724 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6725 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6726 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6727 if (Value.getNode())
6731 // Try to match horizontal ADD/SUB.
6732 unsigned NumUndefsLO = 0;
6733 unsigned NumUndefsHI = 0;
6734 unsigned Half = NumElts/2;
6736 // Count the number of UNDEF operands in the build_vector in input.
6737 for (unsigned i = 0, e = Half; i != e; ++i)
6738 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6741 for (unsigned i = Half, e = NumElts; i != e; ++i)
6742 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6745 // Early exit if this is either a build_vector of all UNDEFs or all the
6746 // operands but one are UNDEF.
6747 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6750 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6751 // Try to match an SSE3 float HADD/HSUB.
6752 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6753 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6755 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6756 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6757 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6758 // Try to match an SSSE3 integer HADD/HSUB.
6759 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6760 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6762 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6763 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6766 if (!Subtarget->hasAVX())
6769 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6770 // Try to match an AVX horizontal add/sub of packed single/double
6771 // precision floating point values from 256-bit vectors.
6772 SDValue InVec2, InVec3;
6773 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6774 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6775 ((InVec0.getOpcode() == ISD::UNDEF ||
6776 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6777 ((InVec1.getOpcode() == ISD::UNDEF ||
6778 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6779 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6781 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6782 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6783 ((InVec0.getOpcode() == ISD::UNDEF ||
6784 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6785 ((InVec1.getOpcode() == ISD::UNDEF ||
6786 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6787 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6788 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6789 // Try to match an AVX2 horizontal add/sub of signed integers.
6790 SDValue InVec2, InVec3;
6792 bool CanFold = true;
6794 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6795 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6796 ((InVec0.getOpcode() == ISD::UNDEF ||
6797 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6798 ((InVec1.getOpcode() == ISD::UNDEF ||
6799 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6800 X86Opcode = X86ISD::HADD;
6801 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6802 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6803 ((InVec0.getOpcode() == ISD::UNDEF ||
6804 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6805 ((InVec1.getOpcode() == ISD::UNDEF ||
6806 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6807 X86Opcode = X86ISD::HSUB;
6812 // Fold this build_vector into a single horizontal add/sub.
6813 // Do this only if the target has AVX2.
6814 if (Subtarget->hasAVX2())
6815 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6817 // Do not try to expand this build_vector into a pair of horizontal
6818 // add/sub if we can emit a pair of scalar add/sub.
6819 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6822 // Convert this build_vector into a pair of horizontal binop followed by
6824 bool isUndefLO = NumUndefsLO == Half;
6825 bool isUndefHI = NumUndefsHI == Half;
6826 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6827 isUndefLO, isUndefHI);
6831 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6832 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6834 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6835 X86Opcode = X86ISD::HADD;
6836 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6837 X86Opcode = X86ISD::HSUB;
6838 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6839 X86Opcode = X86ISD::FHADD;
6840 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6841 X86Opcode = X86ISD::FHSUB;
6845 // Don't try to expand this build_vector into a pair of horizontal add/sub
6846 // if we can simply emit a pair of scalar add/sub.
6847 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6850 // Convert this build_vector into two horizontal add/sub followed by
6852 bool isUndefLO = NumUndefsLO == Half;
6853 bool isUndefHI = NumUndefsHI == Half;
6854 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6855 isUndefLO, isUndefHI);
6862 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6865 MVT VT = Op.getSimpleValueType();
6866 MVT ExtVT = VT.getVectorElementType();
6867 unsigned NumElems = Op.getNumOperands();
6869 // Generate vectors for predicate vectors.
6870 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6871 return LowerBUILD_VECTORvXi1(Op, DAG);
6873 // Vectors containing all zeros can be matched by pxor and xorps later
6874 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6875 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6876 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6877 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6880 return getZeroVector(VT, Subtarget, DAG, dl);
6883 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6884 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6885 // vpcmpeqd on 256-bit vectors.
6886 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6887 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6890 if (!VT.is512BitVector())
6891 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6894 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6895 if (Broadcast.getNode())
6898 unsigned EVTBits = ExtVT.getSizeInBits();
6900 unsigned NumZero = 0;
6901 unsigned NumNonZero = 0;
6902 unsigned NonZeros = 0;
6903 bool IsAllConstants = true;
6904 SmallSet<SDValue, 8> Values;
6905 for (unsigned i = 0; i < NumElems; ++i) {
6906 SDValue Elt = Op.getOperand(i);
6907 if (Elt.getOpcode() == ISD::UNDEF)
6910 if (Elt.getOpcode() != ISD::Constant &&
6911 Elt.getOpcode() != ISD::ConstantFP)
6912 IsAllConstants = false;
6913 if (X86::isZeroNode(Elt))
6916 NonZeros |= (1 << i);
6921 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6922 if (NumNonZero == 0)
6923 return DAG.getUNDEF(VT);
6925 // Special case for single non-zero, non-undef, element.
6926 if (NumNonZero == 1) {
6927 unsigned Idx = countTrailingZeros(NonZeros);
6928 SDValue Item = Op.getOperand(Idx);
6930 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6931 // the value are obviously zero, truncate the value to i32 and do the
6932 // insertion that way. Only do this if the value is non-constant or if the
6933 // value is a constant being inserted into element 0. It is cheaper to do
6934 // a constant pool load than it is to do a movd + shuffle.
6935 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6936 (!IsAllConstants || Idx == 0)) {
6937 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6939 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6940 EVT VecVT = MVT::v4i32;
6941 unsigned VecElts = 4;
6943 // Truncate the value (which may itself be a constant) to i32, and
6944 // convert it to a vector with movd (S2V+shuffle to zero extend).
6945 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6946 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6948 // If using the new shuffle lowering, just directly insert this.
6949 if (ExperimentalVectorShuffleLowering)
6951 ISD::BITCAST, dl, VT,
6952 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6954 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6956 // Now we have our 32-bit value zero extended in the low element of
6957 // a vector. If Idx != 0, swizzle it into place.
6959 SmallVector<int, 4> Mask;
6960 Mask.push_back(Idx);
6961 for (unsigned i = 1; i != VecElts; ++i)
6963 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6966 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6970 // If we have a constant or non-constant insertion into the low element of
6971 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6972 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6973 // depending on what the source datatype is.
6976 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6978 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6979 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6980 if (VT.is256BitVector() || VT.is512BitVector()) {
6981 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6982 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6983 Item, DAG.getIntPtrConstant(0));
6985 assert(VT.is128BitVector() && "Expected an SSE value type!");
6986 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6987 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
6988 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6991 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
6992 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
6993 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
6994 if (VT.is256BitVector()) {
6995 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
6996 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
6998 assert(VT.is128BitVector() && "Expected an SSE value type!");
6999 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7001 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7005 // Is it a vector logical left shift?
7006 if (NumElems == 2 && Idx == 1 &&
7007 X86::isZeroNode(Op.getOperand(0)) &&
7008 !X86::isZeroNode(Op.getOperand(1))) {
7009 unsigned NumBits = VT.getSizeInBits();
7010 return getVShift(true, VT,
7011 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7012 VT, Op.getOperand(1)),
7013 NumBits/2, DAG, *this, dl);
7016 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7019 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7020 // is a non-constant being inserted into an element other than the low one,
7021 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7022 // movd/movss) to move this into the low element, then shuffle it into
7024 if (EVTBits == 32) {
7025 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7027 // If using the new shuffle lowering, just directly insert this.
7028 if (ExperimentalVectorShuffleLowering)
7029 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7031 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7032 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7033 SmallVector<int, 8> MaskVec;
7034 for (unsigned i = 0; i != NumElems; ++i)
7035 MaskVec.push_back(i == Idx ? 0 : 1);
7036 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7040 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7041 if (Values.size() == 1) {
7042 if (EVTBits == 32) {
7043 // Instead of a shuffle like this:
7044 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7045 // Check if it's possible to issue this instead.
7046 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7047 unsigned Idx = countTrailingZeros(NonZeros);
7048 SDValue Item = Op.getOperand(Idx);
7049 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7050 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7055 // A vector full of immediates; various special cases are already
7056 // handled, so this is best done with a single constant-pool load.
7060 // For AVX-length vectors, see if we can use a vector load to get all of the
7061 // elements, otherwise build the individual 128-bit pieces and use
7062 // shuffles to put them in place.
7063 if (VT.is256BitVector() || VT.is512BitVector()) {
7064 SmallVector<SDValue, 64> V;
7065 for (unsigned i = 0; i != NumElems; ++i)
7066 V.push_back(Op.getOperand(i));
7068 // Check for a build vector of consecutive loads.
7069 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7072 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7074 // Build both the lower and upper subvector.
7075 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7076 makeArrayRef(&V[0], NumElems/2));
7077 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7078 makeArrayRef(&V[NumElems / 2], NumElems/2));
7080 // Recreate the wider vector with the lower and upper part.
7081 if (VT.is256BitVector())
7082 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7083 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7086 // Let legalizer expand 2-wide build_vectors.
7087 if (EVTBits == 64) {
7088 if (NumNonZero == 1) {
7089 // One half is zero or undef.
7090 unsigned Idx = countTrailingZeros(NonZeros);
7091 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7092 Op.getOperand(Idx));
7093 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7098 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7099 if (EVTBits == 8 && NumElems == 16) {
7100 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7102 if (V.getNode()) return V;
7105 if (EVTBits == 16 && NumElems == 8) {
7106 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7108 if (V.getNode()) return V;
7111 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7112 if (EVTBits == 32 && NumElems == 4) {
7113 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7118 // If element VT is == 32 bits, turn it into a number of shuffles.
7119 SmallVector<SDValue, 8> V(NumElems);
7120 if (NumElems == 4 && NumZero > 0) {
7121 for (unsigned i = 0; i < 4; ++i) {
7122 bool isZero = !(NonZeros & (1 << i));
7124 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7126 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7129 for (unsigned i = 0; i < 2; ++i) {
7130 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7133 V[i] = V[i*2]; // Must be a zero vector.
7136 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7139 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7142 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7147 bool Reverse1 = (NonZeros & 0x3) == 2;
7148 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7152 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7153 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7155 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7158 if (Values.size() > 1 && VT.is128BitVector()) {
7159 // Check for a build vector of consecutive loads.
7160 for (unsigned i = 0; i < NumElems; ++i)
7161 V[i] = Op.getOperand(i);
7163 // Check for elements which are consecutive loads.
7164 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7168 // Check for a build vector from mostly shuffle plus few inserting.
7169 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7173 // For SSE 4.1, use insertps to put the high elements into the low element.
7174 if (Subtarget->hasSSE41()) {
7176 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7177 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7179 Result = DAG.getUNDEF(VT);
7181 for (unsigned i = 1; i < NumElems; ++i) {
7182 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7183 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7184 Op.getOperand(i), DAG.getIntPtrConstant(i));
7189 // Otherwise, expand into a number of unpckl*, start by extending each of
7190 // our (non-undef) elements to the full vector width with the element in the
7191 // bottom slot of the vector (which generates no code for SSE).
7192 for (unsigned i = 0; i < NumElems; ++i) {
7193 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7194 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7196 V[i] = DAG.getUNDEF(VT);
7199 // Next, we iteratively mix elements, e.g. for v4f32:
7200 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7201 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7202 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7203 unsigned EltStride = NumElems >> 1;
7204 while (EltStride != 0) {
7205 for (unsigned i = 0; i < EltStride; ++i) {
7206 // If V[i+EltStride] is undef and this is the first round of mixing,
7207 // then it is safe to just drop this shuffle: V[i] is already in the
7208 // right place, the one element (since it's the first round) being
7209 // inserted as undef can be dropped. This isn't safe for successive
7210 // rounds because they will permute elements within both vectors.
7211 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7212 EltStride == NumElems/2)
7215 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7224 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7225 // to create 256-bit vectors from two other 128-bit ones.
7226 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7228 MVT ResVT = Op.getSimpleValueType();
7230 assert((ResVT.is256BitVector() ||
7231 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7233 SDValue V1 = Op.getOperand(0);
7234 SDValue V2 = Op.getOperand(1);
7235 unsigned NumElems = ResVT.getVectorNumElements();
7236 if(ResVT.is256BitVector())
7237 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7239 if (Op.getNumOperands() == 4) {
7240 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7241 ResVT.getVectorNumElements()/2);
7242 SDValue V3 = Op.getOperand(2);
7243 SDValue V4 = Op.getOperand(3);
7244 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7245 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7247 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7250 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7251 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7252 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7253 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7254 Op.getNumOperands() == 4)));
7256 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7257 // from two other 128-bit ones.
7259 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7260 return LowerAVXCONCAT_VECTORS(Op, DAG);
7264 //===----------------------------------------------------------------------===//
7265 // Vector shuffle lowering
7267 // This is an experimental code path for lowering vector shuffles on x86. It is
7268 // designed to handle arbitrary vector shuffles and blends, gracefully
7269 // degrading performance as necessary. It works hard to recognize idiomatic
7270 // shuffles and lower them to optimal instruction patterns without leaving
7271 // a framework that allows reasonably efficient handling of all vector shuffle
7273 //===----------------------------------------------------------------------===//
7275 /// \brief Tiny helper function to identify a no-op mask.
7277 /// This is a somewhat boring predicate function. It checks whether the mask
7278 /// array input, which is assumed to be a single-input shuffle mask of the kind
7279 /// used by the X86 shuffle instructions (not a fully general
7280 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7281 /// in-place shuffle are 'no-op's.
7282 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7283 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7284 if (Mask[i] != -1 && Mask[i] != i)
7289 /// \brief Helper function to classify a mask as a single-input mask.
7291 /// This isn't a generic single-input test because in the vector shuffle
7292 /// lowering we canonicalize single inputs to be the first input operand. This
7293 /// means we can more quickly test for a single input by only checking whether
7294 /// an input from the second operand exists. We also assume that the size of
7295 /// mask corresponds to the size of the input vectors which isn't true in the
7296 /// fully general case.
7297 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7299 if (M >= (int)Mask.size())
7304 /// \brief Test whether there are elements crossing 128-bit lanes in this
7307 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7308 /// and we routinely test for these.
7309 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7310 int LaneSize = 128 / VT.getScalarSizeInBits();
7311 int Size = Mask.size();
7312 for (int i = 0; i < Size; ++i)
7313 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7318 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7320 /// This checks a shuffle mask to see if it is performing the same
7321 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7322 /// that it is also not lane-crossing. It may however involve a blend from the
7323 /// same lane of a second vector.
7325 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7326 /// non-trivial to compute in the face of undef lanes. The representation is
7327 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7328 /// entries from both V1 and V2 inputs to the wider mask.
7330 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7331 SmallVectorImpl<int> &RepeatedMask) {
7332 int LaneSize = 128 / VT.getScalarSizeInBits();
7333 RepeatedMask.resize(LaneSize, -1);
7334 int Size = Mask.size();
7335 for (int i = 0; i < Size; ++i) {
7338 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7339 // This entry crosses lanes, so there is no way to model this shuffle.
7342 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7343 if (RepeatedMask[i % LaneSize] == -1)
7344 // This is the first non-undef entry in this slot of a 128-bit lane.
7345 RepeatedMask[i % LaneSize] =
7346 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7347 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7348 // Found a mismatch with the repeated mask.
7354 // Hide this symbol with an anonymous namespace instead of 'static' so that MSVC
7355 // 2013 will allow us to use it as a non-type template parameter.
7358 /// \brief Implementation of the \c isShuffleEquivalent variadic functor.
7360 /// See its documentation for details.
7361 bool isShuffleEquivalentImpl(ArrayRef<int> Mask, ArrayRef<const int *> Args) {
7362 if (Mask.size() != Args.size())
7364 for (int i = 0, e = Mask.size(); i < e; ++i) {
7365 assert(*Args[i] >= 0 && "Arguments must be positive integers!");
7366 if (Mask[i] != -1 && Mask[i] != *Args[i])
7374 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7377 /// This is a fast way to test a shuffle mask against a fixed pattern:
7379 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7381 /// It returns true if the mask is exactly as wide as the argument list, and
7382 /// each element of the mask is either -1 (signifying undef) or the value given
7383 /// in the argument.
7384 static const VariadicFunction1<
7385 bool, ArrayRef<int>, int, isShuffleEquivalentImpl> isShuffleEquivalent = {};
7387 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7389 /// This helper function produces an 8-bit shuffle immediate corresponding to
7390 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7391 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7394 /// NB: We rely heavily on "undef" masks preserving the input lane.
7395 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7396 SelectionDAG &DAG) {
7397 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7398 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7399 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7400 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7401 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7404 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7405 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7406 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7407 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7408 return DAG.getConstant(Imm, MVT::i8);
7411 /// \brief Try to emit a blend instruction for a shuffle.
7413 /// This doesn't do any checks for the availability of instructions for blending
7414 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7415 /// be matched in the backend with the type given. What it does check for is
7416 /// that the shuffle mask is in fact a blend.
7417 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7418 SDValue V2, ArrayRef<int> Mask,
7419 const X86Subtarget *Subtarget,
7420 SelectionDAG &DAG) {
7422 unsigned BlendMask = 0;
7423 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7424 if (Mask[i] >= Size) {
7425 if (Mask[i] != i + Size)
7426 return SDValue(); // Shuffled V2 input!
7427 BlendMask |= 1u << i;
7430 if (Mask[i] >= 0 && Mask[i] != i)
7431 return SDValue(); // Shuffled V1 input!
7433 switch (VT.SimpleTy) {
7438 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7439 DAG.getConstant(BlendMask, MVT::i8));
7443 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7447 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7448 // that instruction.
7449 if (Subtarget->hasAVX2()) {
7450 // Scale the blend by the number of 32-bit dwords per element.
7451 int Scale = VT.getScalarSizeInBits() / 32;
7453 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7454 if (Mask[i] >= Size)
7455 for (int j = 0; j < Scale; ++j)
7456 BlendMask |= 1u << (i * Scale + j);
7458 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7459 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7460 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7461 return DAG.getNode(ISD::BITCAST, DL, VT,
7462 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7463 DAG.getConstant(BlendMask, MVT::i8)));
7467 // For integer shuffles we need to expand the mask and cast the inputs to
7468 // v8i16s prior to blending.
7469 int Scale = 8 / VT.getVectorNumElements();
7471 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7472 if (Mask[i] >= Size)
7473 for (int j = 0; j < Scale; ++j)
7474 BlendMask |= 1u << (i * Scale + j);
7476 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7477 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7478 return DAG.getNode(ISD::BITCAST, DL, VT,
7479 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7480 DAG.getConstant(BlendMask, MVT::i8)));
7484 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7485 SmallVector<int, 8> RepeatedMask;
7486 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7487 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7488 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7490 for (int i = 0; i < 8; ++i)
7491 if (RepeatedMask[i] >= 16)
7492 BlendMask |= 1u << i;
7493 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7494 DAG.getConstant(BlendMask, MVT::i8));
7499 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7500 // Scale the blend by the number of bytes per element.
7501 int Scale = VT.getScalarSizeInBits() / 8;
7502 assert(Mask.size() * Scale == 32 && "Not a 256-bit vector!");
7504 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7505 // mix of LLVM's code generator and the x86 backend. We tell the code
7506 // generator that boolean values in the elements of an x86 vector register
7507 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7508 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7509 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7510 // of the element (the remaining are ignored) and 0 in that high bit would
7511 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7512 // the LLVM model for boolean values in vector elements gets the relevant
7513 // bit set, it is set backwards and over constrained relative to x86's
7515 SDValue VSELECTMask[32];
7516 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7517 for (int j = 0; j < Scale; ++j)
7518 VSELECTMask[Scale * i + j] =
7519 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7520 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8);
7522 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1);
7523 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V2);
7525 ISD::BITCAST, DL, VT,
7526 DAG.getNode(ISD::VSELECT, DL, MVT::v32i8,
7527 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, VSELECTMask),
7532 llvm_unreachable("Not a supported integer vector type!");
7536 /// \brief Generic routine to lower a shuffle and blend as a decomposed set of
7537 /// unblended shuffles followed by an unshuffled blend.
7539 /// This matches the extremely common pattern for handling combined
7540 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7542 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7546 SelectionDAG &DAG) {
7547 // Shuffle the input elements into the desired positions in V1 and V2 and
7548 // blend them together.
7549 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7550 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7551 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7552 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7553 if (Mask[i] >= 0 && Mask[i] < Size) {
7554 V1Mask[i] = Mask[i];
7556 } else if (Mask[i] >= Size) {
7557 V2Mask[i] = Mask[i] - Size;
7558 BlendMask[i] = i + Size;
7561 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7562 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7563 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7566 /// \brief Try to lower a vector shuffle as a byte rotation.
7568 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7569 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7570 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7571 /// try to generically lower a vector shuffle through such an pattern. It
7572 /// does not check for the profitability of lowering either as PALIGNR or
7573 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7574 /// This matches shuffle vectors that look like:
7576 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7578 /// Essentially it concatenates V1 and V2, shifts right by some number of
7579 /// elements, and takes the low elements as the result. Note that while this is
7580 /// specified as a *right shift* because x86 is little-endian, it is a *left
7581 /// rotate* of the vector lanes.
7583 /// Note that this only handles 128-bit vector widths currently.
7584 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7587 const X86Subtarget *Subtarget,
7588 SelectionDAG &DAG) {
7589 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7591 // We need to detect various ways of spelling a rotation:
7592 // [11, 12, 13, 14, 15, 0, 1, 2]
7593 // [-1, 12, 13, 14, -1, -1, 1, -1]
7594 // [-1, -1, -1, -1, -1, -1, 1, 2]
7595 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7596 // [-1, 4, 5, 6, -1, -1, 9, -1]
7597 // [-1, 4, 5, 6, -1, -1, -1, -1]
7600 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7603 assert(Mask[i] >= 0 && "Only -1 is a valid negative mask element!");
7605 // Based on the mod-Size value of this mask element determine where
7606 // a rotated vector would have started.
7607 int StartIdx = i - (Mask[i] % Size);
7609 // The identity rotation isn't interesting, stop.
7612 // If we found the tail of a vector the rotation must be the missing
7613 // front. If we found the head of a vector, it must be how much of the head.
7614 int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
7617 Rotation = CandidateRotation;
7618 else if (Rotation != CandidateRotation)
7619 // The rotations don't match, so we can't match this mask.
7622 // Compute which value this mask is pointing at.
7623 SDValue MaskV = Mask[i] < Size ? V1 : V2;
7625 // Compute which of the two target values this index should be assigned to.
7626 // This reflects whether the high elements are remaining or the low elements
7628 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7630 // Either set up this value if we've not encountered it before, or check
7631 // that it remains consistent.
7634 else if (TargetV != MaskV)
7635 // This may be a rotation, but it pulls from the inputs in some
7636 // unsupported interleaving.
7640 // Check that we successfully analyzed the mask, and normalize the results.
7641 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7642 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7648 assert(VT.getSizeInBits() == 128 &&
7649 "Rotate-based lowering only supports 128-bit lowering!");
7650 assert(Mask.size() <= 16 &&
7651 "Can shuffle at most 16 bytes in a 128-bit vector!");
7653 // The actual rotate instruction rotates bytes, so we need to scale the
7654 // rotation based on how many bytes are in the vector.
7655 int Scale = 16 / Mask.size();
7657 // SSSE3 targets can use the palignr instruction
7658 if (Subtarget->hasSSSE3()) {
7659 // Cast the inputs to v16i8 to match PALIGNR.
7660 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Lo);
7661 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Hi);
7663 return DAG.getNode(ISD::BITCAST, DL, VT,
7664 DAG.getNode(X86ISD::PALIGNR, DL, MVT::v16i8, Hi, Lo,
7665 DAG.getConstant(Rotation * Scale, MVT::i8)));
7668 // Default SSE2 implementation
7669 int LoByteShift = 16 - Rotation * Scale;
7670 int HiByteShift = Rotation * Scale;
7672 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7673 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7674 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7676 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7677 DAG.getConstant(8 * LoByteShift, MVT::i8));
7678 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7679 DAG.getConstant(8 * HiByteShift, MVT::i8));
7680 return DAG.getNode(ISD::BITCAST, DL, VT,
7681 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7684 /// \brief Compute whether each element of a shuffle is zeroable.
7686 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7687 /// Either it is an undef element in the shuffle mask, the element of the input
7688 /// referenced is undef, or the element of the input referenced is known to be
7689 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7690 /// as many lanes with this technique as possible to simplify the remaining
7692 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7693 SDValue V1, SDValue V2) {
7694 SmallBitVector Zeroable(Mask.size(), false);
7696 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7697 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7699 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7701 // Handle the easy cases.
7702 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7707 // If this is an index into a build_vector node, dig out the input value and
7709 SDValue V = M < Size ? V1 : V2;
7710 if (V.getOpcode() != ISD::BUILD_VECTOR)
7713 SDValue Input = V.getOperand(M % Size);
7714 // The UNDEF opcode check really should be dead code here, but not quite
7715 // worth asserting on (it isn't invalid, just unexpected).
7716 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7723 /// \brief Try to lower a vector shuffle as a byte shift (shifts in zeros).
7725 /// Attempts to match a shuffle mask against the PSRLDQ and PSLLDQ SSE2
7726 /// byte-shift instructions. The mask must consist of a shifted sequential
7727 /// shuffle from one of the input vectors and zeroable elements for the
7728 /// remaining 'shifted in' elements.
7730 /// Note that this only handles 128-bit vector widths currently.
7731 static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
7732 SDValue V2, ArrayRef<int> Mask,
7733 SelectionDAG &DAG) {
7734 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7736 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7738 int Size = Mask.size();
7739 int Scale = 16 / Size;
7741 for (int Shift = 1; Shift < Size; Shift++) {
7742 int ByteShift = Shift * Scale;
7744 // PSRLDQ : (little-endian) right byte shift
7745 // [ 5, 6, 7, zz, zz, zz, zz, zz]
7746 // [ -1, 5, 6, 7, zz, zz, zz, zz]
7747 // [ 1, 2, -1, -1, -1, -1, zz, zz]
7748 bool ZeroableRight = true;
7749 for (int i = Size - Shift; i < Size; i++) {
7750 ZeroableRight &= Zeroable[i];
7753 if (ZeroableRight) {
7754 bool ValidShiftRight1 =
7755 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Shift);
7756 bool ValidShiftRight2 =
7757 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Size + Shift);
7759 if (ValidShiftRight1 || ValidShiftRight2) {
7760 // Cast the inputs to v2i64 to match PSRLDQ.
7761 SDValue &TargetV = ValidShiftRight1 ? V1 : V2;
7762 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7763 SDValue Shifted = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, V,
7764 DAG.getConstant(ByteShift * 8, MVT::i8));
7765 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7769 // PSLLDQ : (little-endian) left byte shift
7770 // [ zz, 0, 1, 2, 3, 4, 5, 6]
7771 // [ zz, zz, -1, -1, 2, 3, 4, -1]
7772 // [ zz, zz, zz, zz, zz, zz, -1, 1]
7773 bool ZeroableLeft = true;
7774 for (int i = 0; i < Shift; i++) {
7775 ZeroableLeft &= Zeroable[i];
7779 bool ValidShiftLeft1 =
7780 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, 0);
7781 bool ValidShiftLeft2 =
7782 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, Size);
7784 if (ValidShiftLeft1 || ValidShiftLeft2) {
7785 // Cast the inputs to v2i64 to match PSLLDQ.
7786 SDValue &TargetV = ValidShiftLeft1 ? V1 : V2;
7787 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7788 SDValue Shifted = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, V,
7789 DAG.getConstant(ByteShift * 8, MVT::i8));
7790 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7798 /// \brief Lower a vector shuffle as a zero or any extension.
7800 /// Given a specific number of elements, element bit width, and extension
7801 /// stride, produce either a zero or any extension based on the available
7802 /// features of the subtarget.
7803 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7804 SDLoc DL, MVT VT, int NumElements, int Scale, bool AnyExt, SDValue InputV,
7805 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7806 assert(Scale > 1 && "Need a scale to extend.");
7807 int EltBits = VT.getSizeInBits() / NumElements;
7808 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
7809 "Only 8, 16, and 32 bit elements can be extended.");
7810 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
7812 // Found a valid zext mask! Try various lowering strategies based on the
7813 // input type and available ISA extensions.
7814 if (Subtarget->hasSSE41()) {
7815 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
7816 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
7817 NumElements / Scale);
7818 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
7819 return DAG.getNode(ISD::BITCAST, DL, VT,
7820 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
7823 // For any extends we can cheat for larger element sizes and use shuffle
7824 // instructions that can fold with a load and/or copy.
7825 if (AnyExt && EltBits == 32) {
7826 int PSHUFDMask[4] = {0, -1, 1, -1};
7828 ISD::BITCAST, DL, VT,
7829 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7830 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
7831 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
7833 if (AnyExt && EltBits == 16 && Scale > 2) {
7834 int PSHUFDMask[4] = {0, -1, 0, -1};
7835 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
7836 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
7837 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
7838 int PSHUFHWMask[4] = {1, -1, -1, -1};
7840 ISD::BITCAST, DL, VT,
7841 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
7842 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
7843 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
7846 // If this would require more than 2 unpack instructions to expand, use
7847 // pshufb when available. We can only use more than 2 unpack instructions
7848 // when zero extending i8 elements which also makes it easier to use pshufb.
7849 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
7850 assert(NumElements == 16 && "Unexpected byte vector width!");
7851 SDValue PSHUFBMask[16];
7852 for (int i = 0; i < 16; ++i)
7854 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
7855 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
7856 return DAG.getNode(ISD::BITCAST, DL, VT,
7857 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
7858 DAG.getNode(ISD::BUILD_VECTOR, DL,
7859 MVT::v16i8, PSHUFBMask)));
7862 // Otherwise emit a sequence of unpacks.
7864 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
7865 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
7866 : getZeroVector(InputVT, Subtarget, DAG, DL);
7867 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
7868 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
7872 } while (Scale > 1);
7873 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
7876 /// \brief Try to lower a vector shuffle as a zero extension on any micrarch.
7878 /// This routine will try to do everything in its power to cleverly lower
7879 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
7880 /// check for the profitability of this lowering, it tries to aggressively
7881 /// match this pattern. It will use all of the micro-architectural details it
7882 /// can to emit an efficient lowering. It handles both blends with all-zero
7883 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
7884 /// masking out later).
7886 /// The reason we have dedicated lowering for zext-style shuffles is that they
7887 /// are both incredibly common and often quite performance sensitive.
7888 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
7889 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
7890 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7891 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7893 int Bits = VT.getSizeInBits();
7894 int NumElements = Mask.size();
7896 // Define a helper function to check a particular ext-scale and lower to it if
7898 auto Lower = [&](int Scale) -> SDValue {
7901 for (int i = 0; i < NumElements; ++i) {
7903 continue; // Valid anywhere but doesn't tell us anything.
7904 if (i % Scale != 0) {
7905 // Each of the extended elements need to be zeroable.
7909 // We no longer are in the anyext case.
7914 // Each of the base elements needs to be consecutive indices into the
7915 // same input vector.
7916 SDValue V = Mask[i] < NumElements ? V1 : V2;
7919 else if (InputV != V)
7920 return SDValue(); // Flip-flopping inputs.
7922 if (Mask[i] % NumElements != i / Scale)
7923 return SDValue(); // Non-consecutive strided elements.
7926 // If we fail to find an input, we have a zero-shuffle which should always
7927 // have already been handled.
7928 // FIXME: Maybe handle this here in case during blending we end up with one?
7932 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7933 DL, VT, NumElements, Scale, AnyExt, InputV, Subtarget, DAG);
7936 // The widest scale possible for extending is to a 64-bit integer.
7937 assert(Bits % 64 == 0 &&
7938 "The number of bits in a vector must be divisible by 64 on x86!");
7939 int NumExtElements = Bits / 64;
7941 // Each iteration, try extending the elements half as much, but into twice as
7943 for (; NumExtElements < NumElements; NumExtElements *= 2) {
7944 assert(NumElements % NumExtElements == 0 &&
7945 "The input vector size must be divisible by the extended size.");
7946 if (SDValue V = Lower(NumElements / NumExtElements))
7950 // No viable ext lowering found.
7954 /// \brief Try to get a scalar value for a specific element of a vector.
7956 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
7957 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
7958 SelectionDAG &DAG) {
7959 MVT VT = V.getSimpleValueType();
7960 MVT EltVT = VT.getVectorElementType();
7961 while (V.getOpcode() == ISD::BITCAST)
7962 V = V.getOperand(0);
7963 // If the bitcasts shift the element size, we can't extract an equivalent
7965 MVT NewVT = V.getSimpleValueType();
7966 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
7969 if (V.getOpcode() == ISD::BUILD_VECTOR ||
7970 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
7971 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
7976 /// \brief Helper to test for a load that can be folded with x86 shuffles.
7978 /// This is particularly important because the set of instructions varies
7979 /// significantly based on whether the operand is a load or not.
7980 static bool isShuffleFoldableLoad(SDValue V) {
7981 while (V.getOpcode() == ISD::BITCAST)
7982 V = V.getOperand(0);
7984 return ISD::isNON_EXTLoad(V.getNode());
7987 /// \brief Try to lower insertion of a single element into a zero vector.
7989 /// This is a common pattern that we have especially efficient patterns to lower
7990 /// across all subtarget feature sets.
7991 static SDValue lowerVectorShuffleAsElementInsertion(
7992 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
7993 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7994 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7996 MVT EltVT = VT.getVectorElementType();
7998 int V2Index = std::find_if(Mask.begin(), Mask.end(),
7999 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8001 bool IsV1Zeroable = true;
8002 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8003 if (i != V2Index && !Zeroable[i]) {
8004 IsV1Zeroable = false;
8008 // Check for a single input from a SCALAR_TO_VECTOR node.
8009 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8010 // all the smarts here sunk into that routine. However, the current
8011 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8012 // vector shuffle lowering is dead.
8013 if (SDValue V2S = getScalarValueForVectorElement(
8014 V2, Mask[V2Index] - Mask.size(), DAG)) {
8015 // We need to zext the scalar if it is smaller than an i32.
8016 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8017 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8018 // Using zext to expand a narrow element won't work for non-zero
8023 // Zero-extend directly to i32.
8025 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8027 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8028 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8029 EltVT == MVT::i16) {
8030 // Either not inserting from the low element of the input or the input
8031 // element size is too small to use VZEXT_MOVL to clear the high bits.
8035 if (!IsV1Zeroable) {
8036 // If V1 can't be treated as a zero vector we have fewer options to lower
8037 // this. We can't support integer vectors or non-zero targets cheaply, and
8038 // the V1 elements can't be permuted in any way.
8039 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8040 if (!VT.isFloatingPoint() || V2Index != 0)
8042 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8043 V1Mask[V2Index] = -1;
8044 if (!isNoopShuffleMask(V1Mask))
8046 // This is essentially a special case blend operation, but if we have
8047 // general purpose blend operations, they are always faster. Bail and let
8048 // the rest of the lowering handle these as blends.
8049 if (Subtarget->hasSSE41())
8052 // Otherwise, use MOVSD or MOVSS.
8053 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8054 "Only two types of floating point element types to handle!");
8055 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8059 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8061 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8064 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8065 // the desired position. Otherwise it is more efficient to do a vector
8066 // shift left. We know that we can do a vector shift left because all
8067 // the inputs are zero.
8068 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8069 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8070 V2Shuffle[V2Index] = 0;
8071 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8073 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8075 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8077 V2Index * EltVT.getSizeInBits(),
8078 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8079 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8085 /// \brief Try to lower broadcast of a single element.
8087 /// For convenience, this code also bundles all of the subtarget feature set
8088 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8089 /// a convenient way to factor it out.
8090 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8092 const X86Subtarget *Subtarget,
8093 SelectionDAG &DAG) {
8094 if (!Subtarget->hasAVX())
8096 if (VT.isInteger() && !Subtarget->hasAVX2())
8099 // Check that the mask is a broadcast.
8100 int BroadcastIdx = -1;
8102 if (M >= 0 && BroadcastIdx == -1)
8104 else if (M >= 0 && M != BroadcastIdx)
8107 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8108 "a sorted mask where the broadcast "
8111 // Go up the chain of (vector) values to try and find a scalar load that
8112 // we can combine with the broadcast.
8114 switch (V.getOpcode()) {
8115 case ISD::CONCAT_VECTORS: {
8116 int OperandSize = Mask.size() / V.getNumOperands();
8117 V = V.getOperand(BroadcastIdx / OperandSize);
8118 BroadcastIdx %= OperandSize;
8122 case ISD::INSERT_SUBVECTOR: {
8123 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8124 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8128 int BeginIdx = (int)ConstantIdx->getZExtValue();
8130 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8131 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8132 BroadcastIdx -= BeginIdx;
8143 // Check if this is a broadcast of a scalar. We special case lowering
8144 // for scalars so that we can more effectively fold with loads.
8145 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8146 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8147 V = V.getOperand(BroadcastIdx);
8149 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8151 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8153 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8154 // We can't broadcast from a vector register w/o AVX2, and we can only
8155 // broadcast from the zero-element of a vector register.
8159 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8162 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8163 // INSERTPS when the V1 elements are already in the correct locations
8164 // because otherwise we can just always use two SHUFPS instructions which
8165 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8166 // perform INSERTPS if a single V1 element is out of place and all V2
8167 // elements are zeroable.
8168 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8170 SelectionDAG &DAG) {
8171 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8172 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8173 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8174 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8176 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8179 int V1DstIndex = -1;
8180 int V2DstIndex = -1;
8181 bool V1UsedInPlace = false;
8183 for (int i = 0; i < 4; i++) {
8184 // Synthesize a zero mask from the zeroable elements (includes undefs).
8190 // Flag if we use any V1 inputs in place.
8192 V1UsedInPlace = true;
8196 // We can only insert a single non-zeroable element.
8197 if (V1DstIndex != -1 || V2DstIndex != -1)
8201 // V1 input out of place for insertion.
8204 // V2 input for insertion.
8209 // Don't bother if we have no (non-zeroable) element for insertion.
8210 if (V1DstIndex == -1 && V2DstIndex == -1)
8213 // Determine element insertion src/dst indices. The src index is from the
8214 // start of the inserted vector, not the start of the concatenated vector.
8215 unsigned V2SrcIndex = 0;
8216 if (V1DstIndex != -1) {
8217 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8218 // and don't use the original V2 at all.
8219 V2SrcIndex = Mask[V1DstIndex];
8220 V2DstIndex = V1DstIndex;
8223 V2SrcIndex = Mask[V2DstIndex] - 4;
8226 // If no V1 inputs are used in place, then the result is created only from
8227 // the zero mask and the V2 insertion - so remove V1 dependency.
8229 V1 = DAG.getUNDEF(MVT::v4f32);
8231 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8232 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8234 // Insert the V2 element into the desired position.
8236 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8237 DAG.getConstant(InsertPSMask, MVT::i8));
8240 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8242 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8243 /// support for floating point shuffles but not integer shuffles. These
8244 /// instructions will incur a domain crossing penalty on some chips though so
8245 /// it is better to avoid lowering through this for integer vectors where
8247 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8248 const X86Subtarget *Subtarget,
8249 SelectionDAG &DAG) {
8251 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8252 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8253 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8254 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8255 ArrayRef<int> Mask = SVOp->getMask();
8256 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8258 if (isSingleInputShuffleMask(Mask)) {
8259 // Use low duplicate instructions for masks that match their pattern.
8260 if (Subtarget->hasSSE3())
8261 if (isShuffleEquivalent(Mask, 0, 0))
8262 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8264 // Straight shuffle of a single input vector. Simulate this by using the
8265 // single input as both of the "inputs" to this instruction..
8266 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8268 if (Subtarget->hasAVX()) {
8269 // If we have AVX, we can use VPERMILPS which will allow folding a load
8270 // into the shuffle.
8271 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8272 DAG.getConstant(SHUFPDMask, MVT::i8));
8275 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8276 DAG.getConstant(SHUFPDMask, MVT::i8));
8278 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8279 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8281 // Use dedicated unpack instructions for masks that match their pattern.
8282 if (isShuffleEquivalent(Mask, 0, 2))
8283 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8284 if (isShuffleEquivalent(Mask, 1, 3))
8285 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8287 // If we have a single input, insert that into V1 if we can do so cheaply.
8288 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8289 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8290 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8292 // Try inverting the insertion since for v2 masks it is easy to do and we
8293 // can't reliably sort the mask one way or the other.
8294 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8295 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8296 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8297 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8301 // Try to use one of the special instruction patterns to handle two common
8302 // blend patterns if a zero-blend above didn't work.
8303 if (isShuffleEquivalent(Mask, 0, 3) || isShuffleEquivalent(Mask, 1, 3))
8304 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8305 // We can either use a special instruction to load over the low double or
8306 // to move just the low double.
8308 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8310 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8312 if (Subtarget->hasSSE41())
8313 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8317 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8318 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8319 DAG.getConstant(SHUFPDMask, MVT::i8));
8322 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8324 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8325 /// the integer unit to minimize domain crossing penalties. However, for blends
8326 /// it falls back to the floating point shuffle operation with appropriate bit
8328 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8329 const X86Subtarget *Subtarget,
8330 SelectionDAG &DAG) {
8332 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8333 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8334 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8335 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8336 ArrayRef<int> Mask = SVOp->getMask();
8337 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8339 if (isSingleInputShuffleMask(Mask)) {
8340 // Check for being able to broadcast a single element.
8341 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8342 Mask, Subtarget, DAG))
8345 // Straight shuffle of a single input vector. For everything from SSE2
8346 // onward this has a single fast instruction with no scary immediates.
8347 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8348 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8349 int WidenedMask[4] = {
8350 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8351 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8353 ISD::BITCAST, DL, MVT::v2i64,
8354 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8355 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8358 // Try to use byte shift instructions.
8359 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8360 DL, MVT::v2i64, V1, V2, Mask, DAG))
8363 // If we have a single input from V2 insert that into V1 if we can do so
8365 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8366 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8367 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8369 // Try inverting the insertion since for v2 masks it is easy to do and we
8370 // can't reliably sort the mask one way or the other.
8371 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8372 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8373 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8374 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8378 // Use dedicated unpack instructions for masks that match their pattern.
8379 if (isShuffleEquivalent(Mask, 0, 2))
8380 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8381 if (isShuffleEquivalent(Mask, 1, 3))
8382 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8384 if (Subtarget->hasSSE41())
8385 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8389 // Try to use byte rotation instructions.
8390 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8391 if (Subtarget->hasSSSE3())
8392 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8393 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8396 // We implement this with SHUFPD which is pretty lame because it will likely
8397 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8398 // However, all the alternatives are still more cycles and newer chips don't
8399 // have this problem. It would be really nice if x86 had better shuffles here.
8400 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8401 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8402 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8403 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8406 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8408 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8409 /// It makes no assumptions about whether this is the *best* lowering, it simply
8411 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8412 ArrayRef<int> Mask, SDValue V1,
8413 SDValue V2, SelectionDAG &DAG) {
8414 SDValue LowV = V1, HighV = V2;
8415 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8418 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8420 if (NumV2Elements == 1) {
8422 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8425 // Compute the index adjacent to V2Index and in the same half by toggling
8427 int V2AdjIndex = V2Index ^ 1;
8429 if (Mask[V2AdjIndex] == -1) {
8430 // Handles all the cases where we have a single V2 element and an undef.
8431 // This will only ever happen in the high lanes because we commute the
8432 // vector otherwise.
8434 std::swap(LowV, HighV);
8435 NewMask[V2Index] -= 4;
8437 // Handle the case where the V2 element ends up adjacent to a V1 element.
8438 // To make this work, blend them together as the first step.
8439 int V1Index = V2AdjIndex;
8440 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8441 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8442 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8444 // Now proceed to reconstruct the final blend as we have the necessary
8445 // high or low half formed.
8452 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8453 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8455 } else if (NumV2Elements == 2) {
8456 if (Mask[0] < 4 && Mask[1] < 4) {
8457 // Handle the easy case where we have V1 in the low lanes and V2 in the
8461 } else if (Mask[2] < 4 && Mask[3] < 4) {
8462 // We also handle the reversed case because this utility may get called
8463 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8464 // arrange things in the right direction.
8470 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8471 // trying to place elements directly, just blend them and set up the final
8472 // shuffle to place them.
8474 // The first two blend mask elements are for V1, the second two are for
8476 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8477 Mask[2] < 4 ? Mask[2] : Mask[3],
8478 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8479 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8480 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8481 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8483 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8486 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8487 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8488 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8489 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8492 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8493 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8496 /// \brief Lower 4-lane 32-bit floating point shuffles.
8498 /// Uses instructions exclusively from the floating point unit to minimize
8499 /// domain crossing penalties, as these are sufficient to implement all v4f32
8501 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8502 const X86Subtarget *Subtarget,
8503 SelectionDAG &DAG) {
8505 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8506 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8507 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8508 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8509 ArrayRef<int> Mask = SVOp->getMask();
8510 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8513 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8515 if (NumV2Elements == 0) {
8516 // Check for being able to broadcast a single element.
8517 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8518 Mask, Subtarget, DAG))
8521 // Use even/odd duplicate instructions for masks that match their pattern.
8522 if (Subtarget->hasSSE3()) {
8523 if (isShuffleEquivalent(Mask, 0, 0, 2, 2))
8524 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8525 if (isShuffleEquivalent(Mask, 1, 1, 3, 3))
8526 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8529 if (Subtarget->hasAVX()) {
8530 // If we have AVX, we can use VPERMILPS which will allow folding a load
8531 // into the shuffle.
8532 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8533 getV4X86ShuffleImm8ForMask(Mask, DAG));
8536 // Otherwise, use a straight shuffle of a single input vector. We pass the
8537 // input vector to both operands to simulate this with a SHUFPS.
8538 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8539 getV4X86ShuffleImm8ForMask(Mask, DAG));
8542 // Use dedicated unpack instructions for masks that match their pattern.
8543 if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
8544 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8545 if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
8546 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8548 // There are special ways we can lower some single-element blends. However, we
8549 // have custom ways we can lower more complex single-element blends below that
8550 // we defer to if both this and BLENDPS fail to match, so restrict this to
8551 // when the V2 input is targeting element 0 of the mask -- that is the fast
8553 if (NumV2Elements == 1 && Mask[0] >= 4)
8554 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8555 Mask, Subtarget, DAG))
8558 if (Subtarget->hasSSE41()) {
8559 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8563 // Use INSERTPS if we can complete the shuffle efficiently.
8564 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8568 // Otherwise fall back to a SHUFPS lowering strategy.
8569 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8572 /// \brief Lower 4-lane i32 vector shuffles.
8574 /// We try to handle these with integer-domain shuffles where we can, but for
8575 /// blends we use the floating point domain blend instructions.
8576 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8577 const X86Subtarget *Subtarget,
8578 SelectionDAG &DAG) {
8580 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8581 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8582 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8583 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8584 ArrayRef<int> Mask = SVOp->getMask();
8585 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8587 // Whenever we can lower this as a zext, that instruction is strictly faster
8588 // than any alternative. It also allows us to fold memory operands into the
8589 // shuffle in many cases.
8590 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8591 Mask, Subtarget, DAG))
8595 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8597 if (NumV2Elements == 0) {
8598 // Check for being able to broadcast a single element.
8599 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8600 Mask, Subtarget, DAG))
8603 // Straight shuffle of a single input vector. For everything from SSE2
8604 // onward this has a single fast instruction with no scary immediates.
8605 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8606 // but we aren't actually going to use the UNPCK instruction because doing
8607 // so prevents folding a load into this instruction or making a copy.
8608 const int UnpackLoMask[] = {0, 0, 1, 1};
8609 const int UnpackHiMask[] = {2, 2, 3, 3};
8610 if (isShuffleEquivalent(Mask, 0, 0, 1, 1))
8611 Mask = UnpackLoMask;
8612 else if (isShuffleEquivalent(Mask, 2, 2, 3, 3))
8613 Mask = UnpackHiMask;
8615 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8616 getV4X86ShuffleImm8ForMask(Mask, DAG));
8619 // Try to use byte shift instructions.
8620 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8621 DL, MVT::v4i32, V1, V2, Mask, DAG))
8624 // There are special ways we can lower some single-element blends.
8625 if (NumV2Elements == 1)
8626 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8627 Mask, Subtarget, DAG))
8630 // Use dedicated unpack instructions for masks that match their pattern.
8631 if (isShuffleEquivalent(Mask, 0, 4, 1, 5))
8632 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
8633 if (isShuffleEquivalent(Mask, 2, 6, 3, 7))
8634 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
8636 if (Subtarget->hasSSE41())
8637 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8641 // Try to use byte rotation instructions.
8642 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8643 if (Subtarget->hasSSSE3())
8644 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8645 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
8648 // We implement this with SHUFPS because it can blend from two vectors.
8649 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
8650 // up the inputs, bypassing domain shift penalties that we would encur if we
8651 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
8653 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
8654 DAG.getVectorShuffle(
8656 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
8657 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
8660 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
8661 /// shuffle lowering, and the most complex part.
8663 /// The lowering strategy is to try to form pairs of input lanes which are
8664 /// targeted at the same half of the final vector, and then use a dword shuffle
8665 /// to place them onto the right half, and finally unpack the paired lanes into
8666 /// their final position.
8668 /// The exact breakdown of how to form these dword pairs and align them on the
8669 /// correct sides is really tricky. See the comments within the function for
8670 /// more of the details.
8671 static SDValue lowerV8I16SingleInputVectorShuffle(
8672 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
8673 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8674 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
8675 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
8676 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
8678 SmallVector<int, 4> LoInputs;
8679 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
8680 [](int M) { return M >= 0; });
8681 std::sort(LoInputs.begin(), LoInputs.end());
8682 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
8683 SmallVector<int, 4> HiInputs;
8684 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
8685 [](int M) { return M >= 0; });
8686 std::sort(HiInputs.begin(), HiInputs.end());
8687 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
8689 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
8690 int NumHToL = LoInputs.size() - NumLToL;
8692 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
8693 int NumHToH = HiInputs.size() - NumLToH;
8694 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
8695 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
8696 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
8697 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
8699 // Check for being able to broadcast a single element.
8700 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
8701 Mask, Subtarget, DAG))
8704 // Try to use byte shift instructions.
8705 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8706 DL, MVT::v8i16, V, V, Mask, DAG))
8709 // Use dedicated unpack instructions for masks that match their pattern.
8710 if (isShuffleEquivalent(Mask, 0, 0, 1, 1, 2, 2, 3, 3))
8711 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
8712 if (isShuffleEquivalent(Mask, 4, 4, 5, 5, 6, 6, 7, 7))
8713 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
8715 // Try to use byte rotation instructions.
8716 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8717 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
8720 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
8721 // such inputs we can swap two of the dwords across the half mark and end up
8722 // with <=2 inputs to each half in each half. Once there, we can fall through
8723 // to the generic code below. For example:
8725 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8726 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
8728 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
8729 // and an existing 2-into-2 on the other half. In this case we may have to
8730 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
8731 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
8732 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
8733 // because any other situation (including a 3-into-1 or 1-into-3 in the other
8734 // half than the one we target for fixing) will be fixed when we re-enter this
8735 // path. We will also combine away any sequence of PSHUFD instructions that
8736 // result into a single instruction. Here is an example of the tricky case:
8738 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8739 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
8741 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
8743 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
8744 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
8746 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
8747 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
8749 // The result is fine to be handled by the generic logic.
8750 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
8751 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
8752 int AOffset, int BOffset) {
8753 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
8754 "Must call this with A having 3 or 1 inputs from the A half.");
8755 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
8756 "Must call this with B having 1 or 3 inputs from the B half.");
8757 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
8758 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
8760 // Compute the index of dword with only one word among the three inputs in
8761 // a half by taking the sum of the half with three inputs and subtracting
8762 // the sum of the actual three inputs. The difference is the remaining
8765 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
8766 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
8767 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
8768 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
8769 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
8770 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
8771 int TripleNonInputIdx =
8772 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
8773 TripleDWord = TripleNonInputIdx / 2;
8775 // We use xor with one to compute the adjacent DWord to whichever one the
8777 OneInputDWord = (OneInput / 2) ^ 1;
8779 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
8780 // and BToA inputs. If there is also such a problem with the BToB and AToB
8781 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
8782 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
8783 // is essential that we don't *create* a 3<-1 as then we might oscillate.
8784 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
8785 // Compute how many inputs will be flipped by swapping these DWords. We
8787 // to balance this to ensure we don't form a 3-1 shuffle in the other
8789 int NumFlippedAToBInputs =
8790 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
8791 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
8792 int NumFlippedBToBInputs =
8793 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
8794 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
8795 if ((NumFlippedAToBInputs == 1 &&
8796 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
8797 (NumFlippedBToBInputs == 1 &&
8798 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
8799 // We choose whether to fix the A half or B half based on whether that
8800 // half has zero flipped inputs. At zero, we may not be able to fix it
8801 // with that half. We also bias towards fixing the B half because that
8802 // will more commonly be the high half, and we have to bias one way.
8803 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
8804 ArrayRef<int> Inputs) {
8805 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
8806 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
8807 PinnedIdx ^ 1) != Inputs.end();
8808 // Determine whether the free index is in the flipped dword or the
8809 // unflipped dword based on where the pinned index is. We use this bit
8810 // in an xor to conditionally select the adjacent dword.
8811 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
8812 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
8813 FixFreeIdx) != Inputs.end();
8814 if (IsFixIdxInput == IsFixFreeIdxInput)
8816 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
8817 FixFreeIdx) != Inputs.end();
8818 assert(IsFixIdxInput != IsFixFreeIdxInput &&
8819 "We need to be changing the number of flipped inputs!");
8820 int PSHUFHalfMask[] = {0, 1, 2, 3};
8821 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
8822 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
8824 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
8827 if (M != -1 && M == FixIdx)
8829 else if (M != -1 && M == FixFreeIdx)
8832 if (NumFlippedBToBInputs != 0) {
8834 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
8835 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
8837 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
8839 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
8840 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
8845 int PSHUFDMask[] = {0, 1, 2, 3};
8846 PSHUFDMask[ADWord] = BDWord;
8847 PSHUFDMask[BDWord] = ADWord;
8848 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
8849 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8850 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
8851 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8853 // Adjust the mask to match the new locations of A and B.
8855 if (M != -1 && M/2 == ADWord)
8856 M = 2 * BDWord + M % 2;
8857 else if (M != -1 && M/2 == BDWord)
8858 M = 2 * ADWord + M % 2;
8860 // Recurse back into this routine to re-compute state now that this isn't
8861 // a 3 and 1 problem.
8862 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
8865 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
8866 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
8867 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
8868 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
8870 // At this point there are at most two inputs to the low and high halves from
8871 // each half. That means the inputs can always be grouped into dwords and
8872 // those dwords can then be moved to the correct half with a dword shuffle.
8873 // We use at most one low and one high word shuffle to collect these paired
8874 // inputs into dwords, and finally a dword shuffle to place them.
8875 int PSHUFLMask[4] = {-1, -1, -1, -1};
8876 int PSHUFHMask[4] = {-1, -1, -1, -1};
8877 int PSHUFDMask[4] = {-1, -1, -1, -1};
8879 // First fix the masks for all the inputs that are staying in their
8880 // original halves. This will then dictate the targets of the cross-half
8882 auto fixInPlaceInputs =
8883 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
8884 MutableArrayRef<int> SourceHalfMask,
8885 MutableArrayRef<int> HalfMask, int HalfOffset) {
8886 if (InPlaceInputs.empty())
8888 if (InPlaceInputs.size() == 1) {
8889 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
8890 InPlaceInputs[0] - HalfOffset;
8891 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
8894 if (IncomingInputs.empty()) {
8895 // Just fix all of the in place inputs.
8896 for (int Input : InPlaceInputs) {
8897 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
8898 PSHUFDMask[Input / 2] = Input / 2;
8903 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
8904 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
8905 InPlaceInputs[0] - HalfOffset;
8906 // Put the second input next to the first so that they are packed into
8907 // a dword. We find the adjacent index by toggling the low bit.
8908 int AdjIndex = InPlaceInputs[0] ^ 1;
8909 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
8910 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
8911 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
8913 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
8914 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
8916 // Now gather the cross-half inputs and place them into a free dword of
8917 // their target half.
8918 // FIXME: This operation could almost certainly be simplified dramatically to
8919 // look more like the 3-1 fixing operation.
8920 auto moveInputsToRightHalf = [&PSHUFDMask](
8921 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
8922 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
8923 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
8925 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
8926 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
8928 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
8930 int LowWord = Word & ~1;
8931 int HighWord = Word | 1;
8932 return isWordClobbered(SourceHalfMask, LowWord) ||
8933 isWordClobbered(SourceHalfMask, HighWord);
8936 if (IncomingInputs.empty())
8939 if (ExistingInputs.empty()) {
8940 // Map any dwords with inputs from them into the right half.
8941 for (int Input : IncomingInputs) {
8942 // If the source half mask maps over the inputs, turn those into
8943 // swaps and use the swapped lane.
8944 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
8945 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
8946 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
8947 Input - SourceOffset;
8948 // We have to swap the uses in our half mask in one sweep.
8949 for (int &M : HalfMask)
8950 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
8952 else if (M == Input)
8953 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
8955 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
8956 Input - SourceOffset &&
8957 "Previous placement doesn't match!");
8959 // Note that this correctly re-maps both when we do a swap and when
8960 // we observe the other side of the swap above. We rely on that to
8961 // avoid swapping the members of the input list directly.
8962 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
8965 // Map the input's dword into the correct half.
8966 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
8967 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
8969 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
8971 "Previous placement doesn't match!");
8974 // And just directly shift any other-half mask elements to be same-half
8975 // as we will have mirrored the dword containing the element into the
8976 // same position within that half.
8977 for (int &M : HalfMask)
8978 if (M >= SourceOffset && M < SourceOffset + 4) {
8979 M = M - SourceOffset + DestOffset;
8980 assert(M >= 0 && "This should never wrap below zero!");
8985 // Ensure we have the input in a viable dword of its current half. This
8986 // is particularly tricky because the original position may be clobbered
8987 // by inputs being moved and *staying* in that half.
8988 if (IncomingInputs.size() == 1) {
8989 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
8990 int InputFixed = std::find(std::begin(SourceHalfMask),
8991 std::end(SourceHalfMask), -1) -
8992 std::begin(SourceHalfMask) + SourceOffset;
8993 SourceHalfMask[InputFixed - SourceOffset] =
8994 IncomingInputs[0] - SourceOffset;
8995 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
8997 IncomingInputs[0] = InputFixed;
8999 } else if (IncomingInputs.size() == 2) {
9000 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9001 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9002 // We have two non-adjacent or clobbered inputs we need to extract from
9003 // the source half. To do this, we need to map them into some adjacent
9004 // dword slot in the source mask.
9005 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9006 IncomingInputs[1] - SourceOffset};
9008 // If there is a free slot in the source half mask adjacent to one of
9009 // the inputs, place the other input in it. We use (Index XOR 1) to
9010 // compute an adjacent index.
9011 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9012 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9013 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9014 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9015 InputsFixed[1] = InputsFixed[0] ^ 1;
9016 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9017 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9018 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9019 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9020 InputsFixed[0] = InputsFixed[1] ^ 1;
9021 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9022 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9023 // The two inputs are in the same DWord but it is clobbered and the
9024 // adjacent DWord isn't used at all. Move both inputs to the free
9026 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9027 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9028 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9029 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9031 // The only way we hit this point is if there is no clobbering
9032 // (because there are no off-half inputs to this half) and there is no
9033 // free slot adjacent to one of the inputs. In this case, we have to
9034 // swap an input with a non-input.
9035 for (int i = 0; i < 4; ++i)
9036 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9037 "We can't handle any clobbers here!");
9038 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9039 "Cannot have adjacent inputs here!");
9041 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9042 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9044 // We also have to update the final source mask in this case because
9045 // it may need to undo the above swap.
9046 for (int &M : FinalSourceHalfMask)
9047 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9048 M = InputsFixed[1] + SourceOffset;
9049 else if (M == InputsFixed[1] + SourceOffset)
9050 M = (InputsFixed[0] ^ 1) + SourceOffset;
9052 InputsFixed[1] = InputsFixed[0] ^ 1;
9055 // Point everything at the fixed inputs.
9056 for (int &M : HalfMask)
9057 if (M == IncomingInputs[0])
9058 M = InputsFixed[0] + SourceOffset;
9059 else if (M == IncomingInputs[1])
9060 M = InputsFixed[1] + SourceOffset;
9062 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9063 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9066 llvm_unreachable("Unhandled input size!");
9069 // Now hoist the DWord down to the right half.
9070 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9071 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9072 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9073 for (int &M : HalfMask)
9074 for (int Input : IncomingInputs)
9076 M = FreeDWord * 2 + Input % 2;
9078 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9079 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9080 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9081 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9083 // Now enact all the shuffles we've computed to move the inputs into their
9085 if (!isNoopShuffleMask(PSHUFLMask))
9086 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9087 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9088 if (!isNoopShuffleMask(PSHUFHMask))
9089 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9090 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9091 if (!isNoopShuffleMask(PSHUFDMask))
9092 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9093 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9094 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9095 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9097 // At this point, each half should contain all its inputs, and we can then
9098 // just shuffle them into their final position.
9099 assert(std::count_if(LoMask.begin(), LoMask.end(),
9100 [](int M) { return M >= 4; }) == 0 &&
9101 "Failed to lift all the high half inputs to the low mask!");
9102 assert(std::count_if(HiMask.begin(), HiMask.end(),
9103 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9104 "Failed to lift all the low half inputs to the high mask!");
9106 // Do a half shuffle for the low mask.
9107 if (!isNoopShuffleMask(LoMask))
9108 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9109 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9111 // Do a half shuffle with the high mask after shifting its values down.
9112 for (int &M : HiMask)
9115 if (!isNoopShuffleMask(HiMask))
9116 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9117 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9122 /// \brief Detect whether the mask pattern should be lowered through
9125 /// This essentially tests whether viewing the mask as an interleaving of two
9126 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9127 /// lowering it through interleaving is a significantly better strategy.
9128 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9129 int NumEvenInputs[2] = {0, 0};
9130 int NumOddInputs[2] = {0, 0};
9131 int NumLoInputs[2] = {0, 0};
9132 int NumHiInputs[2] = {0, 0};
9133 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9137 int InputIdx = Mask[i] >= Size;
9140 ++NumLoInputs[InputIdx];
9142 ++NumHiInputs[InputIdx];
9145 ++NumEvenInputs[InputIdx];
9147 ++NumOddInputs[InputIdx];
9150 // The minimum number of cross-input results for both the interleaved and
9151 // split cases. If interleaving results in fewer cross-input results, return
9153 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9154 NumEvenInputs[0] + NumOddInputs[1]);
9155 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9156 NumLoInputs[0] + NumHiInputs[1]);
9157 return InterleavedCrosses < SplitCrosses;
9160 /// \brief Blend two v8i16 vectors using a naive unpack strategy.
9162 /// This strategy only works when the inputs from each vector fit into a single
9163 /// half of that vector, and generally there are not so many inputs as to leave
9164 /// the in-place shuffles required highly constrained (and thus expensive). It
9165 /// shifts all the inputs into a single side of both input vectors and then
9166 /// uses an unpack to interleave these inputs in a single vector. At that
9167 /// point, we will fall back on the generic single input shuffle lowering.
9168 static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
9170 MutableArrayRef<int> Mask,
9171 const X86Subtarget *Subtarget,
9172 SelectionDAG &DAG) {
9173 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9174 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9175 SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
9176 for (int i = 0; i < 8; ++i)
9177 if (Mask[i] >= 0 && Mask[i] < 4)
9178 LoV1Inputs.push_back(i);
9179 else if (Mask[i] >= 4 && Mask[i] < 8)
9180 HiV1Inputs.push_back(i);
9181 else if (Mask[i] >= 8 && Mask[i] < 12)
9182 LoV2Inputs.push_back(i);
9183 else if (Mask[i] >= 12)
9184 HiV2Inputs.push_back(i);
9186 int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
9187 int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
9190 assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
9191 assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
9192 assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
9194 bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
9195 HiV1Inputs.size() + HiV2Inputs.size();
9197 auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
9198 ArrayRef<int> HiInputs, bool MoveToLo,
9200 ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
9201 ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
9202 if (BadInputs.empty())
9205 int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9206 int MoveOffset = MoveToLo ? 0 : 4;
9208 if (GoodInputs.empty()) {
9209 for (int BadInput : BadInputs) {
9210 MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
9211 Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
9214 if (GoodInputs.size() == 2) {
9215 // If the low inputs are spread across two dwords, pack them into
9217 MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
9218 MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
9219 Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
9220 Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
9222 // Otherwise pin the good inputs.
9223 for (int GoodInput : GoodInputs)
9224 MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
9227 if (BadInputs.size() == 2) {
9228 // If we have two bad inputs then there may be either one or two good
9229 // inputs fixed in place. Find a fixed input, and then find the *other*
9230 // two adjacent indices by using modular arithmetic.
9232 std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
9233 [](int M) { return M >= 0; }) -
9234 std::begin(MoveMask);
9236 ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
9237 assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
9238 assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
9239 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9240 MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
9241 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9242 Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
9244 assert(BadInputs.size() == 1 && "All sizes handled");
9245 int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
9246 std::end(MoveMask), -1) -
9247 std::begin(MoveMask);
9248 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9249 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9253 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9256 V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
9258 V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
9261 // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
9262 // cross-half traffic in the final shuffle.
9264 // Munge the mask to be a single-input mask after the unpack merges the
9268 M = 2 * (M % 4) + (M / 8);
9270 return DAG.getVectorShuffle(
9271 MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
9272 DL, MVT::v8i16, V1, V2),
9273 DAG.getUNDEF(MVT::v8i16), Mask);
9276 /// \brief Generic lowering of 8-lane i16 shuffles.
9278 /// This handles both single-input shuffles and combined shuffle/blends with
9279 /// two inputs. The single input shuffles are immediately delegated to
9280 /// a dedicated lowering routine.
9282 /// The blends are lowered in one of three fundamental ways. If there are few
9283 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9284 /// of the input is significantly cheaper when lowered as an interleaving of
9285 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9286 /// halves of the inputs separately (making them have relatively few inputs)
9287 /// and then concatenate them.
9288 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9289 const X86Subtarget *Subtarget,
9290 SelectionDAG &DAG) {
9292 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9293 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9294 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9295 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9296 ArrayRef<int> OrigMask = SVOp->getMask();
9297 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9298 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9299 MutableArrayRef<int> Mask(MaskStorage);
9301 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9303 // Whenever we can lower this as a zext, that instruction is strictly faster
9304 // than any alternative.
9305 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9306 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9309 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9310 auto isV2 = [](int M) { return M >= 8; };
9312 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9313 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9315 if (NumV2Inputs == 0)
9316 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9318 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9319 "to be V1-input shuffles.");
9321 // Try to use byte shift instructions.
9322 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9323 DL, MVT::v8i16, V1, V2, Mask, DAG))
9326 // There are special ways we can lower some single-element blends.
9327 if (NumV2Inputs == 1)
9328 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9329 Mask, Subtarget, DAG))
9332 // Use dedicated unpack instructions for masks that match their pattern.
9333 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9334 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9335 if (isShuffleEquivalent(Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9336 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9338 if (Subtarget->hasSSE41())
9339 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9343 // Try to use byte rotation instructions.
9344 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9345 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9348 if (NumV1Inputs + NumV2Inputs <= 4)
9349 return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
9351 // Check whether an interleaving lowering is likely to be more efficient.
9352 // This isn't perfect but it is a strong heuristic that tends to work well on
9353 // the kinds of shuffles that show up in practice.
9355 // FIXME: Handle 1x, 2x, and 4x interleaving.
9356 if (shouldLowerAsInterleaving(Mask)) {
9357 // FIXME: Figure out whether we should pack these into the low or high
9360 int EMask[8], OMask[8];
9361 for (int i = 0; i < 4; ++i) {
9362 EMask[i] = Mask[2*i];
9363 OMask[i] = Mask[2*i + 1];
9368 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9369 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9371 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9374 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9375 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9377 for (int i = 0; i < 4; ++i) {
9378 LoBlendMask[i] = Mask[i];
9379 HiBlendMask[i] = Mask[i + 4];
9382 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9383 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9384 LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
9385 HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
9387 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9388 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
9391 /// \brief Check whether a compaction lowering can be done by dropping even
9392 /// elements and compute how many times even elements must be dropped.
9394 /// This handles shuffles which take every Nth element where N is a power of
9395 /// two. Example shuffle masks:
9397 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9398 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9399 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9400 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9401 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9402 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9404 /// Any of these lanes can of course be undef.
9406 /// This routine only supports N <= 3.
9407 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9410 /// \returns N above, or the number of times even elements must be dropped if
9411 /// there is such a number. Otherwise returns zero.
9412 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9413 // Figure out whether we're looping over two inputs or just one.
9414 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9416 // The modulus for the shuffle vector entries is based on whether this is
9417 // a single input or not.
9418 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9419 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9420 "We should only be called with masks with a power-of-2 size!");
9422 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9424 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9425 // and 2^3 simultaneously. This is because we may have ambiguity with
9426 // partially undef inputs.
9427 bool ViableForN[3] = {true, true, true};
9429 for (int i = 0, e = Mask.size(); i < e; ++i) {
9430 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9435 bool IsAnyViable = false;
9436 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9437 if (ViableForN[j]) {
9440 // The shuffle mask must be equal to (i * 2^N) % M.
9441 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9444 ViableForN[j] = false;
9446 // Early exit if we exhaust the possible powers of two.
9451 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9455 // Return 0 as there is no viable power of two.
9459 /// \brief Generic lowering of v16i8 shuffles.
9461 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9462 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9463 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9464 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9466 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9467 const X86Subtarget *Subtarget,
9468 SelectionDAG &DAG) {
9470 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9471 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9472 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9473 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9474 ArrayRef<int> OrigMask = SVOp->getMask();
9475 assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9477 // Try to use byte shift instructions.
9478 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9479 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9482 // Try to use byte rotation instructions.
9483 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9484 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9487 // Try to use a zext lowering.
9488 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9489 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9492 int MaskStorage[16] = {
9493 OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9494 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
9495 OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
9496 OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
9497 MutableArrayRef<int> Mask(MaskStorage);
9498 MutableArrayRef<int> LoMask = Mask.slice(0, 8);
9499 MutableArrayRef<int> HiMask = Mask.slice(8, 8);
9502 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9504 // For single-input shuffles, there are some nicer lowering tricks we can use.
9505 if (NumV2Elements == 0) {
9506 // Check for being able to broadcast a single element.
9507 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9508 Mask, Subtarget, DAG))
9511 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9512 // Notably, this handles splat and partial-splat shuffles more efficiently.
9513 // However, it only makes sense if the pre-duplication shuffle simplifies
9514 // things significantly. Currently, this means we need to be able to
9515 // express the pre-duplication shuffle as an i16 shuffle.
9517 // FIXME: We should check for other patterns which can be widened into an
9518 // i16 shuffle as well.
9519 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9520 for (int i = 0; i < 16; i += 2)
9521 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9526 auto tryToWidenViaDuplication = [&]() -> SDValue {
9527 if (!canWidenViaDuplication(Mask))
9529 SmallVector<int, 4> LoInputs;
9530 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9531 [](int M) { return M >= 0 && M < 8; });
9532 std::sort(LoInputs.begin(), LoInputs.end());
9533 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9535 SmallVector<int, 4> HiInputs;
9536 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9537 [](int M) { return M >= 8; });
9538 std::sort(HiInputs.begin(), HiInputs.end());
9539 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9542 bool TargetLo = LoInputs.size() >= HiInputs.size();
9543 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9544 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9546 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9547 SmallDenseMap<int, int, 8> LaneMap;
9548 for (int I : InPlaceInputs) {
9549 PreDupI16Shuffle[I/2] = I/2;
9552 int j = TargetLo ? 0 : 4, je = j + 4;
9553 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9554 // Check if j is already a shuffle of this input. This happens when
9555 // there are two adjacent bytes after we move the low one.
9556 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9557 // If we haven't yet mapped the input, search for a slot into which
9559 while (j < je && PreDupI16Shuffle[j] != -1)
9563 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9566 // Map this input with the i16 shuffle.
9567 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9570 // Update the lane map based on the mapping we ended up with.
9571 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9574 ISD::BITCAST, DL, MVT::v16i8,
9575 DAG.getVectorShuffle(MVT::v8i16, DL,
9576 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9577 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9579 // Unpack the bytes to form the i16s that will be shuffled into place.
9580 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9581 MVT::v16i8, V1, V1);
9583 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9584 for (int i = 0; i < 16; ++i)
9585 if (Mask[i] != -1) {
9586 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9587 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9588 if (PostDupI16Shuffle[i / 2] == -1)
9589 PostDupI16Shuffle[i / 2] = MappedMask;
9591 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9592 "Conflicting entrties in the original shuffle!");
9595 ISD::BITCAST, DL, MVT::v16i8,
9596 DAG.getVectorShuffle(MVT::v8i16, DL,
9597 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9598 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9600 if (SDValue V = tryToWidenViaDuplication())
9604 // Check whether an interleaving lowering is likely to be more efficient.
9605 // This isn't perfect but it is a strong heuristic that tends to work well on
9606 // the kinds of shuffles that show up in practice.
9608 // FIXME: We need to handle other interleaving widths (i16, i32, ...).
9609 if (shouldLowerAsInterleaving(Mask)) {
9610 int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9611 return (M >= 0 && M < 8) || (M >= 16 && M < 24);
9613 int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9614 return (M >= 8 && M < 16) || M >= 24;
9616 int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9617 -1, -1, -1, -1, -1, -1, -1, -1};
9618 int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9619 -1, -1, -1, -1, -1, -1, -1, -1};
9620 bool UnpackLo = NumLoHalf >= NumHiHalf;
9621 MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
9622 MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
9623 for (int i = 0; i < 8; ++i) {
9624 TargetEMask[i] = Mask[2 * i];
9625 TargetOMask[i] = Mask[2 * i + 1];
9628 SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
9629 SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
9631 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9632 MVT::v16i8, Evens, Odds);
9635 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9636 // with PSHUFB. It is important to do this before we attempt to generate any
9637 // blends but after all of the single-input lowerings. If the single input
9638 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9639 // want to preserve that and we can DAG combine any longer sequences into
9640 // a PSHUFB in the end. But once we start blending from multiple inputs,
9641 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9642 // and there are *very* few patterns that would actually be faster than the
9643 // PSHUFB approach because of its ability to zero lanes.
9645 // FIXME: The only exceptions to the above are blends which are exact
9646 // interleavings with direct instructions supporting them. We currently don't
9647 // handle those well here.
9648 if (Subtarget->hasSSSE3()) {
9651 bool V1InUse = false;
9652 bool V2InUse = false;
9653 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9655 for (int i = 0; i < 16; ++i) {
9656 if (Mask[i] == -1) {
9657 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9659 const int ZeroMask = 0x80;
9660 int V1Idx = (Mask[i] < 16 ? Mask[i] : ZeroMask);
9661 int V2Idx = (Mask[i] < 16 ? ZeroMask : Mask[i] - 16);
9663 V1Idx = V2Idx = ZeroMask;
9664 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
9665 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
9666 V1InUse |= (ZeroMask != V1Idx);
9667 V2InUse |= (ZeroMask != V2Idx);
9672 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
9673 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9675 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
9676 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9678 // If we need shuffled inputs from both, blend the two.
9679 if (V1InUse && V2InUse)
9680 return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9682 return V1; // Single inputs are easy.
9684 return V2; // Single inputs are easy.
9685 // Shuffling to a zeroable vector.
9686 return getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
9689 // There are special ways we can lower some single-element blends.
9690 if (NumV2Elements == 1)
9691 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
9692 Mask, Subtarget, DAG))
9695 // Check whether a compaction lowering can be done. This handles shuffles
9696 // which take every Nth element for some even N. See the helper function for
9699 // We special case these as they can be particularly efficiently handled with
9700 // the PACKUSB instruction on x86 and they show up in common patterns of
9701 // rearranging bytes to truncate wide elements.
9702 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
9703 // NumEvenDrops is the power of two stride of the elements. Another way of
9704 // thinking about it is that we need to drop the even elements this many
9705 // times to get the original input.
9706 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9708 // First we need to zero all the dropped bytes.
9709 assert(NumEvenDrops <= 3 &&
9710 "No support for dropping even elements more than 3 times.");
9711 // We use the mask type to pick which bytes are preserved based on how many
9712 // elements are dropped.
9713 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
9714 SDValue ByteClearMask =
9715 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
9716 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
9717 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
9719 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
9721 // Now pack things back together.
9722 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
9723 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
9724 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
9725 for (int i = 1; i < NumEvenDrops; ++i) {
9726 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
9727 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
9733 int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9734 int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9735 int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9736 int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9738 auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
9739 MutableArrayRef<int> V1HalfBlendMask,
9740 MutableArrayRef<int> V2HalfBlendMask) {
9741 for (int i = 0; i < 8; ++i)
9742 if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
9743 V1HalfBlendMask[i] = HalfMask[i];
9745 } else if (HalfMask[i] >= 16) {
9746 V2HalfBlendMask[i] = HalfMask[i] - 16;
9747 HalfMask[i] = i + 8;
9750 buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
9751 buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
9753 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
9755 auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
9756 MutableArrayRef<int> HiBlendMask) {
9758 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
9759 // them out and avoid using UNPCK{L,H} to extract the elements of V as
9761 if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
9762 [](int M) { return M >= 0 && M % 2 == 1; }) &&
9763 std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
9764 [](int M) { return M >= 0 && M % 2 == 1; })) {
9765 // Use a mask to drop the high bytes.
9766 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
9767 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
9768 DAG.getConstant(0x00FF, MVT::v8i16));
9770 // This will be a single vector shuffle instead of a blend so nuke V2.
9771 V2 = DAG.getUNDEF(MVT::v8i16);
9773 // Squash the masks to point directly into V1.
9774 for (int &M : LoBlendMask)
9777 for (int &M : HiBlendMask)
9781 // Otherwise just unpack the low half of V into V1 and the high half into
9782 // V2 so that we can blend them as i16s.
9783 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9784 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
9785 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9786 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
9789 SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9790 SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9791 return std::make_pair(BlendedLo, BlendedHi);
9793 SDValue V1Lo, V1Hi, V2Lo, V2Hi;
9794 std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
9795 std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
9797 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
9798 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
9800 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
9803 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
9805 /// This routine breaks down the specific type of 128-bit shuffle and
9806 /// dispatches to the lowering routines accordingly.
9807 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9808 MVT VT, const X86Subtarget *Subtarget,
9809 SelectionDAG &DAG) {
9810 switch (VT.SimpleTy) {
9812 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9814 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
9816 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9818 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
9820 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
9822 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
9825 llvm_unreachable("Unimplemented!");
9829 /// \brief Helper function to test whether a shuffle mask could be
9830 /// simplified by widening the elements being shuffled.
9832 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
9833 /// leaves it in an unspecified state.
9835 /// NOTE: This must handle normal vector shuffle masks and *target* vector
9836 /// shuffle masks. The latter have the special property of a '-2' representing
9837 /// a zero-ed lane of a vector.
9838 static bool canWidenShuffleElements(ArrayRef<int> Mask,
9839 SmallVectorImpl<int> &WidenedMask) {
9840 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
9841 // If both elements are undef, its trivial.
9842 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
9843 WidenedMask.push_back(SM_SentinelUndef);
9847 // Check for an undef mask and a mask value properly aligned to fit with
9848 // a pair of values. If we find such a case, use the non-undef mask's value.
9849 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
9850 WidenedMask.push_back(Mask[i + 1] / 2);
9853 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
9854 WidenedMask.push_back(Mask[i] / 2);
9858 // When zeroing, we need to spread the zeroing across both lanes to widen.
9859 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
9860 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
9861 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
9862 WidenedMask.push_back(SM_SentinelZero);
9868 // Finally check if the two mask values are adjacent and aligned with
9870 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
9871 WidenedMask.push_back(Mask[i] / 2);
9875 // Otherwise we can't safely widen the elements used in this shuffle.
9878 assert(WidenedMask.size() == Mask.size() / 2 &&
9879 "Incorrect size of mask after widening the elements!");
9884 /// \brief Generic routine to split ector shuffle into half-sized shuffles.
9886 /// This routine just extracts two subvectors, shuffles them independently, and
9887 /// then concatenates them back together. This should work effectively with all
9888 /// AVX vector shuffle types.
9889 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
9890 SDValue V2, ArrayRef<int> Mask,
9891 SelectionDAG &DAG) {
9892 assert(VT.getSizeInBits() >= 256 &&
9893 "Only for 256-bit or wider vector shuffles!");
9894 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
9895 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
9897 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
9898 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
9900 int NumElements = VT.getVectorNumElements();
9901 int SplitNumElements = NumElements / 2;
9902 MVT ScalarVT = VT.getScalarType();
9903 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
9905 SDValue LoV1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V1,
9906 DAG.getIntPtrConstant(0));
9907 SDValue HiV1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V1,
9908 DAG.getIntPtrConstant(SplitNumElements));
9909 SDValue LoV2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V2,
9910 DAG.getIntPtrConstant(0));
9911 SDValue HiV2 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SplitVT, V2,
9912 DAG.getIntPtrConstant(SplitNumElements));
9914 // Now create two 4-way blends of these half-width vectors.
9915 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
9916 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
9917 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
9918 for (int i = 0; i < SplitNumElements; ++i) {
9919 int M = HalfMask[i];
9920 if (M >= NumElements) {
9921 if (M >= NumElements + SplitNumElements)
9925 V2BlendMask.push_back(M - NumElements);
9926 V1BlendMask.push_back(-1);
9927 BlendMask.push_back(SplitNumElements + i);
9928 } else if (M >= 0) {
9929 if (M >= SplitNumElements)
9933 V2BlendMask.push_back(-1);
9934 V1BlendMask.push_back(M);
9935 BlendMask.push_back(i);
9937 V2BlendMask.push_back(-1);
9938 V1BlendMask.push_back(-1);
9939 BlendMask.push_back(-1);
9943 // Because the lowering happens after all combining takes place, we need to
9944 // manually combine these blend masks as much as possible so that we create
9945 // a minimal number of high-level vector shuffle nodes.
9947 // First try just blending the halves of V1 or V2.
9948 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
9949 return DAG.getUNDEF(SplitVT);
9950 if (!UseLoV2 && !UseHiV2)
9951 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
9952 if (!UseLoV1 && !UseHiV1)
9953 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
9955 SDValue V1Blend, V2Blend;
9956 if (UseLoV1 && UseHiV1) {
9958 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
9960 // We only use half of V1 so map the usage down into the final blend mask.
9961 V1Blend = UseLoV1 ? LoV1 : HiV1;
9962 for (int i = 0; i < SplitNumElements; ++i)
9963 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
9964 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
9966 if (UseLoV2 && UseHiV2) {
9968 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
9970 // We only use half of V2 so map the usage down into the final blend mask.
9971 V2Blend = UseLoV2 ? LoV2 : HiV2;
9972 for (int i = 0; i < SplitNumElements; ++i)
9973 if (BlendMask[i] >= SplitNumElements)
9974 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
9976 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
9978 SDValue Lo = HalfBlend(LoMask);
9979 SDValue Hi = HalfBlend(HiMask);
9980 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
9983 /// \brief Either split a vector in halves or decompose the shuffles and the
9986 /// This is provided as a good fallback for many lowerings of non-single-input
9987 /// shuffles with more than one 128-bit lane. In those cases, we want to select
9988 /// between splitting the shuffle into 128-bit components and stitching those
9989 /// back together vs. extracting the single-input shuffles and blending those
9991 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
9992 SDValue V2, ArrayRef<int> Mask,
9993 SelectionDAG &DAG) {
9994 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
9995 "lower single-input shuffles as it "
9996 "could then recurse on itself.");
9997 int Size = Mask.size();
9999 // If this can be modeled as a broadcast of two elements followed by a blend,
10000 // prefer that lowering. This is especially important because broadcasts can
10001 // often fold with memory operands.
10002 auto DoBothBroadcast = [&] {
10003 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10006 if (V2BroadcastIdx == -1)
10007 V2BroadcastIdx = M - Size;
10008 else if (M - Size != V2BroadcastIdx)
10010 } else if (M >= 0) {
10011 if (V1BroadcastIdx == -1)
10012 V1BroadcastIdx = M;
10013 else if (M != V1BroadcastIdx)
10018 if (DoBothBroadcast())
10019 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10022 // If the inputs all stem from a single 128-bit lane of each input, then we
10023 // split them rather than blending because the split will decompose to
10024 // unusually few instructions.
10025 int LaneCount = VT.getSizeInBits() / 128;
10026 int LaneSize = Size / LaneCount;
10027 SmallBitVector LaneInputs[2];
10028 LaneInputs[0].resize(LaneCount, false);
10029 LaneInputs[1].resize(LaneCount, false);
10030 for (int i = 0; i < Size; ++i)
10032 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10033 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10034 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10036 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10037 // that the decomposed single-input shuffles don't end up here.
10038 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10041 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10042 /// a permutation and blend of those lanes.
10044 /// This essentially blends the out-of-lane inputs to each lane into the lane
10045 /// from a permuted copy of the vector. This lowering strategy results in four
10046 /// instructions in the worst case for a single-input cross lane shuffle which
10047 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10048 /// of. Special cases for each particular shuffle pattern should be handled
10049 /// prior to trying this lowering.
10050 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10051 SDValue V1, SDValue V2,
10052 ArrayRef<int> Mask,
10053 SelectionDAG &DAG) {
10054 // FIXME: This should probably be generalized for 512-bit vectors as well.
10055 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10056 int LaneSize = Mask.size() / 2;
10058 // If there are only inputs from one 128-bit lane, splitting will in fact be
10059 // less expensive. The flags track wether the given lane contains an element
10060 // that crosses to another lane.
10061 bool LaneCrossing[2] = {false, false};
10062 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10063 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10064 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10065 if (!LaneCrossing[0] || !LaneCrossing[1])
10066 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10068 if (isSingleInputShuffleMask(Mask)) {
10069 SmallVector<int, 32> FlippedBlendMask;
10070 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10071 FlippedBlendMask.push_back(
10072 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10074 : Mask[i] % LaneSize +
10075 (i / LaneSize) * LaneSize + Size));
10077 // Flip the vector, and blend the results which should now be in-lane. The
10078 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10079 // 5 for the high source. The value 3 selects the high half of source 2 and
10080 // the value 2 selects the low half of source 2. We only use source 2 to
10081 // allow folding it into a memory operand.
10082 unsigned PERMMask = 3 | 2 << 4;
10083 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10084 V1, DAG.getConstant(PERMMask, MVT::i8));
10085 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10088 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10089 // will be handled by the above logic and a blend of the results, much like
10090 // other patterns in AVX.
10091 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10094 /// \brief Handle lowering 2-lane 128-bit shuffles.
10095 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10096 SDValue V2, ArrayRef<int> Mask,
10097 const X86Subtarget *Subtarget,
10098 SelectionDAG &DAG) {
10099 // Blends are faster and handle all the non-lane-crossing cases.
10100 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10104 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10105 VT.getVectorNumElements() / 2);
10106 // Check for patterns which can be matched with a single insert of a 128-bit
10108 if (isShuffleEquivalent(Mask, 0, 1, 0, 1) ||
10109 isShuffleEquivalent(Mask, 0, 1, 4, 5)) {
10110 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10111 DAG.getIntPtrConstant(0));
10112 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10113 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10114 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10116 if (isShuffleEquivalent(Mask, 0, 1, 6, 7)) {
10117 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10118 DAG.getIntPtrConstant(0));
10119 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10120 DAG.getIntPtrConstant(2));
10121 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10124 // Otherwise form a 128-bit permutation.
10125 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10126 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10127 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10128 DAG.getConstant(PermMask, MVT::i8));
10131 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10132 /// shuffling each lane.
10134 /// This will only succeed when the result of fixing the 128-bit lanes results
10135 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10136 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10137 /// the lane crosses early and then use simpler shuffles within each lane.
10139 /// FIXME: It might be worthwhile at some point to support this without
10140 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10141 /// in x86 only floating point has interesting non-repeating shuffles, and even
10142 /// those are still *marginally* more expensive.
10143 static SDValue lowerVectorShuffleByMerging128BitLanes(
10144 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10145 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10146 assert(!isSingleInputShuffleMask(Mask) &&
10147 "This is only useful with multiple inputs.");
10149 int Size = Mask.size();
10150 int LaneSize = 128 / VT.getScalarSizeInBits();
10151 int NumLanes = Size / LaneSize;
10152 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10154 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10155 // check whether the in-128-bit lane shuffles share a repeating pattern.
10156 SmallVector<int, 4> Lanes;
10157 Lanes.resize(NumLanes, -1);
10158 SmallVector<int, 4> InLaneMask;
10159 InLaneMask.resize(LaneSize, -1);
10160 for (int i = 0; i < Size; ++i) {
10164 int j = i / LaneSize;
10166 if (Lanes[j] < 0) {
10167 // First entry we've seen for this lane.
10168 Lanes[j] = Mask[i] / LaneSize;
10169 } else if (Lanes[j] != Mask[i] / LaneSize) {
10170 // This doesn't match the lane selected previously!
10174 // Check that within each lane we have a consistent shuffle mask.
10175 int k = i % LaneSize;
10176 if (InLaneMask[k] < 0) {
10177 InLaneMask[k] = Mask[i] % LaneSize;
10178 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10179 // This doesn't fit a repeating in-lane mask.
10184 // First shuffle the lanes into place.
10185 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10186 VT.getSizeInBits() / 64);
10187 SmallVector<int, 8> LaneMask;
10188 LaneMask.resize(NumLanes * 2, -1);
10189 for (int i = 0; i < NumLanes; ++i)
10190 if (Lanes[i] >= 0) {
10191 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10192 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10195 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10196 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10197 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10199 // Cast it back to the type we actually want.
10200 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10202 // Now do a simple shuffle that isn't lane crossing.
10203 SmallVector<int, 8> NewMask;
10204 NewMask.resize(Size, -1);
10205 for (int i = 0; i < Size; ++i)
10207 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10208 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10209 "Must not introduce lane crosses at this point!");
10211 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10214 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10217 /// This returns true if the elements from a particular input are already in the
10218 /// slot required by the given mask and require no permutation.
10219 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10220 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10221 int Size = Mask.size();
10222 for (int i = 0; i < Size; ++i)
10223 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10229 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10231 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10232 /// isn't available.
10233 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10234 const X86Subtarget *Subtarget,
10235 SelectionDAG &DAG) {
10237 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10238 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10239 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10240 ArrayRef<int> Mask = SVOp->getMask();
10241 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10243 SmallVector<int, 4> WidenedMask;
10244 if (canWidenShuffleElements(Mask, WidenedMask))
10245 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10248 if (isSingleInputShuffleMask(Mask)) {
10249 // Check for being able to broadcast a single element.
10250 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10251 Mask, Subtarget, DAG))
10254 // Use low duplicate instructions for masks that match their pattern.
10255 if (isShuffleEquivalent(Mask, 0, 0, 2, 2))
10256 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10258 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10259 // Non-half-crossing single input shuffles can be lowerid with an
10260 // interleaved permutation.
10261 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10262 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10263 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10264 DAG.getConstant(VPERMILPMask, MVT::i8));
10267 // With AVX2 we have direct support for this permutation.
10268 if (Subtarget->hasAVX2())
10269 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10270 getV4X86ShuffleImm8ForMask(Mask, DAG));
10272 // Otherwise, fall back.
10273 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10277 // X86 has dedicated unpack instructions that can handle specific blend
10278 // operations: UNPCKH and UNPCKL.
10279 if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
10280 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10281 if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
10282 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10284 // If we have a single input to the zero element, insert that into V1 if we
10285 // can do so cheaply.
10286 int NumV2Elements =
10287 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10288 if (NumV2Elements == 1 && Mask[0] >= 4)
10289 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10290 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10293 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10297 // Check if the blend happens to exactly fit that of SHUFPD.
10298 if ((Mask[0] == -1 || Mask[0] < 2) &&
10299 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10300 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10301 (Mask[3] == -1 || Mask[3] >= 6)) {
10302 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10303 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10304 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10305 DAG.getConstant(SHUFPDMask, MVT::i8));
10307 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10308 (Mask[1] == -1 || Mask[1] < 2) &&
10309 (Mask[2] == -1 || Mask[2] >= 6) &&
10310 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10311 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10312 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10313 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10314 DAG.getConstant(SHUFPDMask, MVT::i8));
10317 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10318 // shuffle. However, if we have AVX2 and either inputs are already in place,
10319 // we will be able to shuffle even across lanes the other input in a single
10320 // instruction so skip this pattern.
10321 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10322 isShuffleMaskInputInPlace(1, Mask))))
10323 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10324 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10327 // If we have AVX2 then we always want to lower with a blend because an v4 we
10328 // can fully permute the elements.
10329 if (Subtarget->hasAVX2())
10330 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10333 // Otherwise fall back on generic lowering.
10334 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10337 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10339 /// This routine is only called when we have AVX2 and thus a reasonable
10340 /// instruction set for v4i64 shuffling..
10341 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10342 const X86Subtarget *Subtarget,
10343 SelectionDAG &DAG) {
10345 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10346 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10347 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10348 ArrayRef<int> Mask = SVOp->getMask();
10349 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10350 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10352 SmallVector<int, 4> WidenedMask;
10353 if (canWidenShuffleElements(Mask, WidenedMask))
10354 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10357 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10361 // Check for being able to broadcast a single element.
10362 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10363 Mask, Subtarget, DAG))
10366 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10367 // use lower latency instructions that will operate on both 128-bit lanes.
10368 SmallVector<int, 2> RepeatedMask;
10369 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10370 if (isSingleInputShuffleMask(Mask)) {
10371 int PSHUFDMask[] = {-1, -1, -1, -1};
10372 for (int i = 0; i < 2; ++i)
10373 if (RepeatedMask[i] >= 0) {
10374 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10375 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10377 return DAG.getNode(
10378 ISD::BITCAST, DL, MVT::v4i64,
10379 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10380 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10381 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10384 // Use dedicated unpack instructions for masks that match their pattern.
10385 if (isShuffleEquivalent(Mask, 0, 4, 2, 6))
10386 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10387 if (isShuffleEquivalent(Mask, 1, 5, 3, 7))
10388 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10391 // AVX2 provides a direct instruction for permuting a single input across
10393 if (isSingleInputShuffleMask(Mask))
10394 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10395 getV4X86ShuffleImm8ForMask(Mask, DAG));
10397 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10398 // shuffle. However, if we have AVX2 and either inputs are already in place,
10399 // we will be able to shuffle even across lanes the other input in a single
10400 // instruction so skip this pattern.
10401 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10402 isShuffleMaskInputInPlace(1, Mask))))
10403 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10404 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10407 // Otherwise fall back on generic blend lowering.
10408 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10412 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10414 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10415 /// isn't available.
10416 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10417 const X86Subtarget *Subtarget,
10418 SelectionDAG &DAG) {
10420 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10421 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10422 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10423 ArrayRef<int> Mask = SVOp->getMask();
10424 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10426 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10430 // Check for being able to broadcast a single element.
10431 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10432 Mask, Subtarget, DAG))
10435 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10436 // options to efficiently lower the shuffle.
10437 SmallVector<int, 4> RepeatedMask;
10438 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10439 assert(RepeatedMask.size() == 4 &&
10440 "Repeated masks must be half the mask width!");
10442 // Use even/odd duplicate instructions for masks that match their pattern.
10443 if (isShuffleEquivalent(Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10444 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10445 if (isShuffleEquivalent(Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10446 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10448 if (isSingleInputShuffleMask(Mask))
10449 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10450 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10452 // Use dedicated unpack instructions for masks that match their pattern.
10453 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10454 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10455 if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10456 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10458 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10459 // have already handled any direct blends. We also need to squash the
10460 // repeated mask into a simulated v4f32 mask.
10461 for (int i = 0; i < 4; ++i)
10462 if (RepeatedMask[i] >= 8)
10463 RepeatedMask[i] -= 4;
10464 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10467 // If we have a single input shuffle with different shuffle patterns in the
10468 // two 128-bit lanes use the variable mask to VPERMILPS.
10469 if (isSingleInputShuffleMask(Mask)) {
10470 SDValue VPermMask[8];
10471 for (int i = 0; i < 8; ++i)
10472 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10473 : DAG.getConstant(Mask[i], MVT::i32);
10474 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10475 return DAG.getNode(
10476 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10477 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10479 if (Subtarget->hasAVX2())
10480 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10481 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10482 DAG.getNode(ISD::BUILD_VECTOR, DL,
10483 MVT::v8i32, VPermMask)),
10486 // Otherwise, fall back.
10487 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10491 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10493 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10494 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10497 // If we have AVX2 then we always want to lower with a blend because at v8 we
10498 // can fully permute the elements.
10499 if (Subtarget->hasAVX2())
10500 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10503 // Otherwise fall back on generic lowering.
10504 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10507 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10509 /// This routine is only called when we have AVX2 and thus a reasonable
10510 /// instruction set for v8i32 shuffling..
10511 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10512 const X86Subtarget *Subtarget,
10513 SelectionDAG &DAG) {
10515 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10516 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10517 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10518 ArrayRef<int> Mask = SVOp->getMask();
10519 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10520 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10522 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10526 // Check for being able to broadcast a single element.
10527 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10528 Mask, Subtarget, DAG))
10531 // If the shuffle mask is repeated in each 128-bit lane we can use more
10532 // efficient instructions that mirror the shuffles across the two 128-bit
10534 SmallVector<int, 4> RepeatedMask;
10535 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10536 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10537 if (isSingleInputShuffleMask(Mask))
10538 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10539 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10541 // Use dedicated unpack instructions for masks that match their pattern.
10542 if (isShuffleEquivalent(Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10543 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10544 if (isShuffleEquivalent(Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10545 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10548 // If the shuffle patterns aren't repeated but it is a single input, directly
10549 // generate a cross-lane VPERMD instruction.
10550 if (isSingleInputShuffleMask(Mask)) {
10551 SDValue VPermMask[8];
10552 for (int i = 0; i < 8; ++i)
10553 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10554 : DAG.getConstant(Mask[i], MVT::i32);
10555 return DAG.getNode(
10556 X86ISD::VPERMV, DL, MVT::v8i32,
10557 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10560 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10562 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10563 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10566 // Otherwise fall back on generic blend lowering.
10567 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10571 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10573 /// This routine is only called when we have AVX2 and thus a reasonable
10574 /// instruction set for v16i16 shuffling..
10575 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10576 const X86Subtarget *Subtarget,
10577 SelectionDAG &DAG) {
10579 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10580 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10581 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10582 ArrayRef<int> Mask = SVOp->getMask();
10583 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10584 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10586 // Check for being able to broadcast a single element.
10587 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
10588 Mask, Subtarget, DAG))
10591 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10595 // Use dedicated unpack instructions for masks that match their pattern.
10596 if (isShuffleEquivalent(Mask,
10597 // First 128-bit lane:
10598 0, 16, 1, 17, 2, 18, 3, 19,
10599 // Second 128-bit lane:
10600 8, 24, 9, 25, 10, 26, 11, 27))
10601 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
10602 if (isShuffleEquivalent(Mask,
10603 // First 128-bit lane:
10604 4, 20, 5, 21, 6, 22, 7, 23,
10605 // Second 128-bit lane:
10606 12, 28, 13, 29, 14, 30, 15, 31))
10607 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
10609 if (isSingleInputShuffleMask(Mask)) {
10610 // There are no generalized cross-lane shuffle operations available on i16
10612 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
10613 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
10616 SDValue PSHUFBMask[32];
10617 for (int i = 0; i < 16; ++i) {
10618 if (Mask[i] == -1) {
10619 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
10623 int M = i < 8 ? Mask[i] : Mask[i] - 8;
10624 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
10625 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
10626 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
10628 return DAG.getNode(
10629 ISD::BITCAST, DL, MVT::v16i16,
10631 X86ISD::PSHUFB, DL, MVT::v32i8,
10632 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
10633 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
10636 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10638 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10639 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10642 // Otherwise fall back on generic lowering.
10643 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
10646 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
10648 /// This routine is only called when we have AVX2 and thus a reasonable
10649 /// instruction set for v32i8 shuffling..
10650 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10651 const X86Subtarget *Subtarget,
10652 SelectionDAG &DAG) {
10654 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10655 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10656 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10657 ArrayRef<int> Mask = SVOp->getMask();
10658 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10659 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
10661 // Check for being able to broadcast a single element.
10662 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
10663 Mask, Subtarget, DAG))
10666 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
10670 // Use dedicated unpack instructions for masks that match their pattern.
10671 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
10673 if (isShuffleEquivalent(
10675 // First 128-bit lane:
10676 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
10677 // Second 128-bit lane:
10678 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
10679 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
10680 if (isShuffleEquivalent(
10682 // First 128-bit lane:
10683 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
10684 // Second 128-bit lane:
10685 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
10686 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
10688 if (isSingleInputShuffleMask(Mask)) {
10689 // There are no generalized cross-lane shuffle operations available on i8
10691 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
10692 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
10695 SDValue PSHUFBMask[32];
10696 for (int i = 0; i < 32; ++i)
10699 ? DAG.getUNDEF(MVT::i8)
10700 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
10702 return DAG.getNode(
10703 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
10704 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
10707 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10709 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10710 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
10713 // Otherwise fall back on generic lowering.
10714 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
10717 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
10719 /// This routine either breaks down the specific type of a 256-bit x86 vector
10720 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
10721 /// together based on the available instructions.
10722 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10723 MVT VT, const X86Subtarget *Subtarget,
10724 SelectionDAG &DAG) {
10726 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10727 ArrayRef<int> Mask = SVOp->getMask();
10729 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
10730 // check for those subtargets here and avoid much of the subtarget querying in
10731 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
10732 // ability to manipulate a 256-bit vector with integer types. Since we'll use
10733 // floating point types there eventually, just immediately cast everything to
10734 // a float and operate entirely in that domain.
10735 if (VT.isInteger() && !Subtarget->hasAVX2()) {
10736 int ElementBits = VT.getScalarSizeInBits();
10737 if (ElementBits < 32)
10738 // No floating point type available, decompose into 128-bit vectors.
10739 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10741 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
10742 VT.getVectorNumElements());
10743 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
10744 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
10745 return DAG.getNode(ISD::BITCAST, DL, VT,
10746 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
10749 switch (VT.SimpleTy) {
10751 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10753 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10755 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10757 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10759 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10761 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10764 llvm_unreachable("Not a valid 256-bit x86 vector type!");
10768 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
10769 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10770 const X86Subtarget *Subtarget,
10771 SelectionDAG &DAG) {
10773 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
10774 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
10775 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10776 ArrayRef<int> Mask = SVOp->getMask();
10777 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10779 // X86 has dedicated unpack instructions that can handle specific blend
10780 // operations: UNPCKH and UNPCKL.
10781 if (isShuffleEquivalent(Mask, 0, 8, 2, 10, 4, 12, 6, 14))
10782 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
10783 if (isShuffleEquivalent(Mask, 1, 9, 3, 11, 5, 13, 7, 15))
10784 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
10786 // FIXME: Implement direct support for this type!
10787 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
10790 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
10791 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10792 const X86Subtarget *Subtarget,
10793 SelectionDAG &DAG) {
10795 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
10796 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
10797 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10798 ArrayRef<int> Mask = SVOp->getMask();
10799 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10801 // Use dedicated unpack instructions for masks that match their pattern.
10802 if (isShuffleEquivalent(Mask,
10803 0, 16, 1, 17, 4, 20, 5, 21,
10804 8, 24, 9, 25, 12, 28, 13, 29))
10805 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
10806 if (isShuffleEquivalent(Mask,
10807 2, 18, 3, 19, 6, 22, 7, 23,
10808 10, 26, 11, 27, 14, 30, 15, 31))
10809 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
10811 // FIXME: Implement direct support for this type!
10812 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
10815 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
10816 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10817 const X86Subtarget *Subtarget,
10818 SelectionDAG &DAG) {
10820 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
10821 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
10822 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10823 ArrayRef<int> Mask = SVOp->getMask();
10824 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10826 // X86 has dedicated unpack instructions that can handle specific blend
10827 // operations: UNPCKH and UNPCKL.
10828 if (isShuffleEquivalent(Mask, 0, 8, 2, 10, 4, 12, 6, 14))
10829 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
10830 if (isShuffleEquivalent(Mask, 1, 9, 3, 11, 5, 13, 7, 15))
10831 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
10833 // FIXME: Implement direct support for this type!
10834 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
10837 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
10838 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10839 const X86Subtarget *Subtarget,
10840 SelectionDAG &DAG) {
10842 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
10843 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
10844 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10845 ArrayRef<int> Mask = SVOp->getMask();
10846 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10848 // Use dedicated unpack instructions for masks that match their pattern.
10849 if (isShuffleEquivalent(Mask,
10850 0, 16, 1, 17, 4, 20, 5, 21,
10851 8, 24, 9, 25, 12, 28, 13, 29))
10852 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
10853 if (isShuffleEquivalent(Mask,
10854 2, 18, 3, 19, 6, 22, 7, 23,
10855 10, 26, 11, 27, 14, 30, 15, 31))
10856 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
10858 // FIXME: Implement direct support for this type!
10859 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
10862 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
10863 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10864 const X86Subtarget *Subtarget,
10865 SelectionDAG &DAG) {
10867 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
10868 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
10869 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10870 ArrayRef<int> Mask = SVOp->getMask();
10871 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10872 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
10874 // FIXME: Implement direct support for this type!
10875 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
10878 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
10879 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10880 const X86Subtarget *Subtarget,
10881 SelectionDAG &DAG) {
10883 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
10884 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
10885 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10886 ArrayRef<int> Mask = SVOp->getMask();
10887 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
10888 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
10890 // FIXME: Implement direct support for this type!
10891 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
10894 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
10896 /// This routine either breaks down the specific type of a 512-bit x86 vector
10897 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
10898 /// together based on the available instructions.
10899 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10900 MVT VT, const X86Subtarget *Subtarget,
10901 SelectionDAG &DAG) {
10903 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10904 ArrayRef<int> Mask = SVOp->getMask();
10905 assert(Subtarget->hasAVX512() &&
10906 "Cannot lower 512-bit vectors w/ basic ISA!");
10908 // Check for being able to broadcast a single element.
10909 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
10910 Mask, Subtarget, DAG))
10913 // Dispatch to each element type for lowering. If we don't have supprot for
10914 // specific element type shuffles at 512 bits, immediately split them and
10915 // lower them. Each lowering routine of a given type is allowed to assume that
10916 // the requisite ISA extensions for that element type are available.
10917 switch (VT.SimpleTy) {
10919 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10921 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10923 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10925 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10927 if (Subtarget->hasBWI())
10928 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10931 if (Subtarget->hasBWI())
10932 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10936 llvm_unreachable("Not a valid 512-bit x86 vector type!");
10939 // Otherwise fall back on splitting.
10940 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10943 /// \brief Top-level lowering for x86 vector shuffles.
10945 /// This handles decomposition, canonicalization, and lowering of all x86
10946 /// vector shuffles. Most of the specific lowering strategies are encapsulated
10947 /// above in helper routines. The canonicalization attempts to widen shuffles
10948 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
10949 /// s.t. only one of the two inputs needs to be tested, etc.
10950 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
10951 SelectionDAG &DAG) {
10952 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10953 ArrayRef<int> Mask = SVOp->getMask();
10954 SDValue V1 = Op.getOperand(0);
10955 SDValue V2 = Op.getOperand(1);
10956 MVT VT = Op.getSimpleValueType();
10957 int NumElements = VT.getVectorNumElements();
10960 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
10962 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
10963 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
10964 if (V1IsUndef && V2IsUndef)
10965 return DAG.getUNDEF(VT);
10967 // When we create a shuffle node we put the UNDEF node to second operand,
10968 // but in some cases the first operand may be transformed to UNDEF.
10969 // In this case we should just commute the node.
10971 return DAG.getCommutedVectorShuffle(*SVOp);
10973 // Check for non-undef masks pointing at an undef vector and make the masks
10974 // undef as well. This makes it easier to match the shuffle based solely on
10978 if (M >= NumElements) {
10979 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
10980 for (int &M : NewMask)
10981 if (M >= NumElements)
10983 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
10986 // Try to collapse shuffles into using a vector type with fewer elements but
10987 // wider element types. We cap this to not form integers or floating point
10988 // elements wider than 64 bits, but it might be interesting to form i128
10989 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
10990 SmallVector<int, 16> WidenedMask;
10991 if (VT.getScalarSizeInBits() < 64 &&
10992 canWidenShuffleElements(Mask, WidenedMask)) {
10993 MVT NewEltVT = VT.isFloatingPoint()
10994 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
10995 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
10996 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
10997 // Make sure that the new vector type is legal. For example, v2f64 isn't
10999 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11000 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11001 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11002 return DAG.getNode(ISD::BITCAST, dl, VT,
11003 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11007 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11008 for (int M : SVOp->getMask())
11010 ++NumUndefElements;
11011 else if (M < NumElements)
11016 // Commute the shuffle as needed such that more elements come from V1 than
11017 // V2. This allows us to match the shuffle pattern strictly on how many
11018 // elements come from V1 without handling the symmetric cases.
11019 if (NumV2Elements > NumV1Elements)
11020 return DAG.getCommutedVectorShuffle(*SVOp);
11022 // When the number of V1 and V2 elements are the same, try to minimize the
11023 // number of uses of V2 in the low half of the vector. When that is tied,
11024 // ensure that the sum of indices for V1 is equal to or lower than the sum
11025 // indices for V2. When those are equal, try to ensure that the number of odd
11026 // indices for V1 is lower than the number of odd indices for V2.
11027 if (NumV1Elements == NumV2Elements) {
11028 int LowV1Elements = 0, LowV2Elements = 0;
11029 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11030 if (M >= NumElements)
11034 if (LowV2Elements > LowV1Elements) {
11035 return DAG.getCommutedVectorShuffle(*SVOp);
11036 } else if (LowV2Elements == LowV1Elements) {
11037 int SumV1Indices = 0, SumV2Indices = 0;
11038 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11039 if (SVOp->getMask()[i] >= NumElements)
11041 else if (SVOp->getMask()[i] >= 0)
11043 if (SumV2Indices < SumV1Indices) {
11044 return DAG.getCommutedVectorShuffle(*SVOp);
11045 } else if (SumV2Indices == SumV1Indices) {
11046 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11047 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11048 if (SVOp->getMask()[i] >= NumElements)
11049 NumV2OddIndices += i % 2;
11050 else if (SVOp->getMask()[i] >= 0)
11051 NumV1OddIndices += i % 2;
11052 if (NumV2OddIndices < NumV1OddIndices)
11053 return DAG.getCommutedVectorShuffle(*SVOp);
11058 // For each vector width, delegate to a specialized lowering routine.
11059 if (VT.getSizeInBits() == 128)
11060 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11062 if (VT.getSizeInBits() == 256)
11063 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11065 // Force AVX-512 vectors to be scalarized for now.
11066 // FIXME: Implement AVX-512 support!
11067 if (VT.getSizeInBits() == 512)
11068 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11070 llvm_unreachable("Unimplemented!");
11074 //===----------------------------------------------------------------------===//
11075 // Legacy vector shuffle lowering
11077 // This code is the legacy code handling vector shuffles until the above
11078 // replaces its functionality and performance.
11079 //===----------------------------------------------------------------------===//
11081 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11082 bool hasInt256, unsigned *MaskOut = nullptr) {
11083 MVT EltVT = VT.getVectorElementType();
11085 // There is no blend with immediate in AVX-512.
11086 if (VT.is512BitVector())
11089 if (!hasSSE41 || EltVT == MVT::i8)
11091 if (!hasInt256 && VT == MVT::v16i16)
11094 unsigned MaskValue = 0;
11095 unsigned NumElems = VT.getVectorNumElements();
11096 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11097 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11098 unsigned NumElemsInLane = NumElems / NumLanes;
11100 // Blend for v16i16 should be symetric for the both lanes.
11101 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11103 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11104 int EltIdx = MaskVals[i];
11106 if ((EltIdx < 0 || EltIdx == (int)i) &&
11107 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11110 if (((unsigned)EltIdx == (i + NumElems)) &&
11111 (SndLaneEltIdx < 0 ||
11112 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11113 MaskValue |= (1 << i);
11119 *MaskOut = MaskValue;
11123 // Try to lower a shuffle node into a simple blend instruction.
11124 // This function assumes isBlendMask returns true for this
11125 // SuffleVectorSDNode
11126 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11127 unsigned MaskValue,
11128 const X86Subtarget *Subtarget,
11129 SelectionDAG &DAG) {
11130 MVT VT = SVOp->getSimpleValueType(0);
11131 MVT EltVT = VT.getVectorElementType();
11132 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11133 Subtarget->hasInt256() && "Trying to lower a "
11134 "VECTOR_SHUFFLE to a Blend but "
11135 "with the wrong mask"));
11136 SDValue V1 = SVOp->getOperand(0);
11137 SDValue V2 = SVOp->getOperand(1);
11139 unsigned NumElems = VT.getVectorNumElements();
11141 // Convert i32 vectors to floating point if it is not AVX2.
11142 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11144 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11145 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11147 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11148 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11151 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11152 DAG.getConstant(MaskValue, MVT::i32));
11153 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11156 /// In vector type \p VT, return true if the element at index \p InputIdx
11157 /// falls on a different 128-bit lane than \p OutputIdx.
11158 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11159 unsigned OutputIdx) {
11160 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11161 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11164 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11165 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11166 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11167 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11169 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11170 SelectionDAG &DAG) {
11171 MVT VT = V1.getSimpleValueType();
11172 assert(VT.is128BitVector() || VT.is256BitVector());
11174 MVT EltVT = VT.getVectorElementType();
11175 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11176 unsigned NumElts = VT.getVectorNumElements();
11178 SmallVector<SDValue, 32> PshufbMask;
11179 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11180 int InputIdx = MaskVals[OutputIdx];
11181 unsigned InputByteIdx;
11183 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11184 InputByteIdx = 0x80;
11186 // Cross lane is not allowed.
11187 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11189 InputByteIdx = InputIdx * EltSizeInBytes;
11190 // Index is an byte offset within the 128-bit lane.
11191 InputByteIdx &= 0xf;
11194 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11195 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11196 if (InputByteIdx != 0x80)
11201 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11203 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11204 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11205 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11208 // v8i16 shuffles - Prefer shuffles in the following order:
11209 // 1. [all] pshuflw, pshufhw, optional move
11210 // 2. [ssse3] 1 x pshufb
11211 // 3. [ssse3] 2 x pshufb + 1 x por
11212 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11214 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11215 SelectionDAG &DAG) {
11216 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11217 SDValue V1 = SVOp->getOperand(0);
11218 SDValue V2 = SVOp->getOperand(1);
11220 SmallVector<int, 8> MaskVals;
11222 // Determine if more than 1 of the words in each of the low and high quadwords
11223 // of the result come from the same quadword of one of the two inputs. Undef
11224 // mask values count as coming from any quadword, for better codegen.
11226 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11227 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11228 unsigned LoQuad[] = { 0, 0, 0, 0 };
11229 unsigned HiQuad[] = { 0, 0, 0, 0 };
11230 // Indices of quads used.
11231 std::bitset<4> InputQuads;
11232 for (unsigned i = 0; i < 8; ++i) {
11233 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11234 int EltIdx = SVOp->getMaskElt(i);
11235 MaskVals.push_back(EltIdx);
11243 ++Quad[EltIdx / 4];
11244 InputQuads.set(EltIdx / 4);
11247 int BestLoQuad = -1;
11248 unsigned MaxQuad = 1;
11249 for (unsigned i = 0; i < 4; ++i) {
11250 if (LoQuad[i] > MaxQuad) {
11252 MaxQuad = LoQuad[i];
11256 int BestHiQuad = -1;
11258 for (unsigned i = 0; i < 4; ++i) {
11259 if (HiQuad[i] > MaxQuad) {
11261 MaxQuad = HiQuad[i];
11265 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11266 // of the two input vectors, shuffle them into one input vector so only a
11267 // single pshufb instruction is necessary. If there are more than 2 input
11268 // quads, disable the next transformation since it does not help SSSE3.
11269 bool V1Used = InputQuads[0] || InputQuads[1];
11270 bool V2Used = InputQuads[2] || InputQuads[3];
11271 if (Subtarget->hasSSSE3()) {
11272 if (InputQuads.count() == 2 && V1Used && V2Used) {
11273 BestLoQuad = InputQuads[0] ? 0 : 1;
11274 BestHiQuad = InputQuads[2] ? 2 : 3;
11276 if (InputQuads.count() > 2) {
11282 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11283 // the shuffle mask. If a quad is scored as -1, that means that it contains
11284 // words from all 4 input quadwords.
11286 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11288 BestLoQuad < 0 ? 0 : BestLoQuad,
11289 BestHiQuad < 0 ? 1 : BestHiQuad
11291 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11292 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11293 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11294 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11296 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11297 // source words for the shuffle, to aid later transformations.
11298 bool AllWordsInNewV = true;
11299 bool InOrder[2] = { true, true };
11300 for (unsigned i = 0; i != 8; ++i) {
11301 int idx = MaskVals[i];
11303 InOrder[i/4] = false;
11304 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11306 AllWordsInNewV = false;
11310 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11311 if (AllWordsInNewV) {
11312 for (int i = 0; i != 8; ++i) {
11313 int idx = MaskVals[i];
11316 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11317 if ((idx != i) && idx < 4)
11319 if ((idx != i) && idx > 3)
11328 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11329 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11330 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11331 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11332 unsigned TargetMask = 0;
11333 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11334 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11335 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11336 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11337 getShufflePSHUFLWImmediate(SVOp);
11338 V1 = NewV.getOperand(0);
11339 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11343 // Promote splats to a larger type which usually leads to more efficient code.
11344 // FIXME: Is this true if pshufb is available?
11345 if (SVOp->isSplat())
11346 return PromoteSplat(SVOp, DAG);
11348 // If we have SSSE3, and all words of the result are from 1 input vector,
11349 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11350 // is present, fall back to case 4.
11351 if (Subtarget->hasSSSE3()) {
11352 SmallVector<SDValue,16> pshufbMask;
11354 // If we have elements from both input vectors, set the high bit of the
11355 // shuffle mask element to zero out elements that come from V2 in the V1
11356 // mask, and elements that come from V1 in the V2 mask, so that the two
11357 // results can be OR'd together.
11358 bool TwoInputs = V1Used && V2Used;
11359 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11361 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11363 // Calculate the shuffle mask for the second input, shuffle it, and
11364 // OR it with the first shuffled input.
11365 CommuteVectorShuffleMask(MaskVals, 8);
11366 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11367 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11368 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11371 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11372 // and update MaskVals with new element order.
11373 std::bitset<8> InOrder;
11374 if (BestLoQuad >= 0) {
11375 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11376 for (int i = 0; i != 4; ++i) {
11377 int idx = MaskVals[i];
11380 } else if ((idx / 4) == BestLoQuad) {
11381 MaskV[i] = idx & 3;
11385 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11388 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11389 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11390 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11391 NewV.getOperand(0),
11392 getShufflePSHUFLWImmediate(SVOp), DAG);
11396 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11397 // and update MaskVals with the new element order.
11398 if (BestHiQuad >= 0) {
11399 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11400 for (unsigned i = 4; i != 8; ++i) {
11401 int idx = MaskVals[i];
11404 } else if ((idx / 4) == BestHiQuad) {
11405 MaskV[i] = (idx & 3) + 4;
11409 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11412 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11413 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11414 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11415 NewV.getOperand(0),
11416 getShufflePSHUFHWImmediate(SVOp), DAG);
11420 // In case BestHi & BestLo were both -1, which means each quadword has a word
11421 // from each of the four input quadwords, calculate the InOrder bitvector now
11422 // before falling through to the insert/extract cleanup.
11423 if (BestLoQuad == -1 && BestHiQuad == -1) {
11425 for (int i = 0; i != 8; ++i)
11426 if (MaskVals[i] < 0 || MaskVals[i] == i)
11430 // The other elements are put in the right place using pextrw and pinsrw.
11431 for (unsigned i = 0; i != 8; ++i) {
11434 int EltIdx = MaskVals[i];
11437 SDValue ExtOp = (EltIdx < 8) ?
11438 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11439 DAG.getIntPtrConstant(EltIdx)) :
11440 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11441 DAG.getIntPtrConstant(EltIdx - 8));
11442 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11443 DAG.getIntPtrConstant(i));
11448 /// \brief v16i16 shuffles
11450 /// FIXME: We only support generation of a single pshufb currently. We can
11451 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11452 /// well (e.g 2 x pshufb + 1 x por).
11454 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11455 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11456 SDValue V1 = SVOp->getOperand(0);
11457 SDValue V2 = SVOp->getOperand(1);
11460 if (V2.getOpcode() != ISD::UNDEF)
11463 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11464 return getPSHUFB(MaskVals, V1, dl, DAG);
11467 // v16i8 shuffles - Prefer shuffles in the following order:
11468 // 1. [ssse3] 1 x pshufb
11469 // 2. [ssse3] 2 x pshufb + 1 x por
11470 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11471 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11472 const X86Subtarget* Subtarget,
11473 SelectionDAG &DAG) {
11474 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11475 SDValue V1 = SVOp->getOperand(0);
11476 SDValue V2 = SVOp->getOperand(1);
11478 ArrayRef<int> MaskVals = SVOp->getMask();
11480 // Promote splats to a larger type which usually leads to more efficient code.
11481 // FIXME: Is this true if pshufb is available?
11482 if (SVOp->isSplat())
11483 return PromoteSplat(SVOp, DAG);
11485 // If we have SSSE3, case 1 is generated when all result bytes come from
11486 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11487 // present, fall back to case 3.
11489 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11490 if (Subtarget->hasSSSE3()) {
11491 SmallVector<SDValue,16> pshufbMask;
11493 // If all result elements are from one input vector, then only translate
11494 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11496 // Otherwise, we have elements from both input vectors, and must zero out
11497 // elements that come from V2 in the first mask, and V1 in the second mask
11498 // so that we can OR them together.
11499 for (unsigned i = 0; i != 16; ++i) {
11500 int EltIdx = MaskVals[i];
11501 if (EltIdx < 0 || EltIdx >= 16)
11503 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11505 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11506 DAG.getNode(ISD::BUILD_VECTOR, dl,
11507 MVT::v16i8, pshufbMask));
11509 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11510 // the 2nd operand if it's undefined or zero.
11511 if (V2.getOpcode() == ISD::UNDEF ||
11512 ISD::isBuildVectorAllZeros(V2.getNode()))
11515 // Calculate the shuffle mask for the second input, shuffle it, and
11516 // OR it with the first shuffled input.
11517 pshufbMask.clear();
11518 for (unsigned i = 0; i != 16; ++i) {
11519 int EltIdx = MaskVals[i];
11520 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11521 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11523 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11524 DAG.getNode(ISD::BUILD_VECTOR, dl,
11525 MVT::v16i8, pshufbMask));
11526 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11529 // No SSSE3 - Calculate in place words and then fix all out of place words
11530 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
11531 // the 16 different words that comprise the two doublequadword input vectors.
11532 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11533 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
11535 for (int i = 0; i != 8; ++i) {
11536 int Elt0 = MaskVals[i*2];
11537 int Elt1 = MaskVals[i*2+1];
11539 // This word of the result is all undef, skip it.
11540 if (Elt0 < 0 && Elt1 < 0)
11543 // This word of the result is already in the correct place, skip it.
11544 if ((Elt0 == i*2) && (Elt1 == i*2+1))
11547 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
11548 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
11551 // If Elt0 and Elt1 are defined, are consecutive, and can be load
11552 // using a single extract together, load it and store it.
11553 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
11554 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11555 DAG.getIntPtrConstant(Elt1 / 2));
11556 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11557 DAG.getIntPtrConstant(i));
11561 // If Elt1 is defined, extract it from the appropriate source. If the
11562 // source byte is not also odd, shift the extracted word left 8 bits
11563 // otherwise clear the bottom 8 bits if we need to do an or.
11565 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11566 DAG.getIntPtrConstant(Elt1 / 2));
11567 if ((Elt1 & 1) == 0)
11568 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
11570 TLI.getShiftAmountTy(InsElt.getValueType())));
11571 else if (Elt0 >= 0)
11572 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
11573 DAG.getConstant(0xFF00, MVT::i16));
11575 // If Elt0 is defined, extract it from the appropriate source. If the
11576 // source byte is not also even, shift the extracted word right 8 bits. If
11577 // Elt1 was also defined, OR the extracted values together before
11578 // inserting them in the result.
11580 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
11581 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
11582 if ((Elt0 & 1) != 0)
11583 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
11585 TLI.getShiftAmountTy(InsElt0.getValueType())));
11586 else if (Elt1 >= 0)
11587 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
11588 DAG.getConstant(0x00FF, MVT::i16));
11589 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
11592 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11593 DAG.getIntPtrConstant(i));
11595 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
11598 // v32i8 shuffles - Translate to VPSHUFB if possible.
11600 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
11601 const X86Subtarget *Subtarget,
11602 SelectionDAG &DAG) {
11603 MVT VT = SVOp->getSimpleValueType(0);
11604 SDValue V1 = SVOp->getOperand(0);
11605 SDValue V2 = SVOp->getOperand(1);
11607 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11609 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11610 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
11611 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
11613 // VPSHUFB may be generated if
11614 // (1) one of input vector is undefined or zeroinitializer.
11615 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
11616 // And (2) the mask indexes don't cross the 128-bit lane.
11617 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
11618 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
11621 if (V1IsAllZero && !V2IsAllZero) {
11622 CommuteVectorShuffleMask(MaskVals, 32);
11625 return getPSHUFB(MaskVals, V1, dl, DAG);
11628 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
11629 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
11630 /// done when every pair / quad of shuffle mask elements point to elements in
11631 /// the right sequence. e.g.
11632 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
11634 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
11635 SelectionDAG &DAG) {
11636 MVT VT = SVOp->getSimpleValueType(0);
11638 unsigned NumElems = VT.getVectorNumElements();
11641 switch (VT.SimpleTy) {
11642 default: llvm_unreachable("Unexpected!");
11645 return SDValue(SVOp, 0);
11646 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
11647 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
11648 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
11649 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
11650 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
11651 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
11654 SmallVector<int, 8> MaskVec;
11655 for (unsigned i = 0; i != NumElems; i += Scale) {
11657 for (unsigned j = 0; j != Scale; ++j) {
11658 int EltIdx = SVOp->getMaskElt(i+j);
11662 StartIdx = (EltIdx / Scale);
11663 if (EltIdx != (int)(StartIdx*Scale + j))
11666 MaskVec.push_back(StartIdx);
11669 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
11670 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
11671 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
11674 /// getVZextMovL - Return a zero-extending vector move low node.
11676 static SDValue getVZextMovL(MVT VT, MVT OpVT,
11677 SDValue SrcOp, SelectionDAG &DAG,
11678 const X86Subtarget *Subtarget, SDLoc dl) {
11679 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
11680 LoadSDNode *LD = nullptr;
11681 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
11682 LD = dyn_cast<LoadSDNode>(SrcOp);
11684 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
11686 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
11687 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
11688 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
11689 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
11690 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
11692 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
11693 return DAG.getNode(ISD::BITCAST, dl, VT,
11694 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
11695 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
11697 SrcOp.getOperand(0)
11703 return DAG.getNode(ISD::BITCAST, dl, VT,
11704 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
11705 DAG.getNode(ISD::BITCAST, dl,
11709 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
11710 /// which could not be matched by any known target speficic shuffle
11712 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
11714 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
11715 if (NewOp.getNode())
11718 MVT VT = SVOp->getSimpleValueType(0);
11720 unsigned NumElems = VT.getVectorNumElements();
11721 unsigned NumLaneElems = NumElems / 2;
11724 MVT EltVT = VT.getVectorElementType();
11725 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
11728 SmallVector<int, 16> Mask;
11729 for (unsigned l = 0; l < 2; ++l) {
11730 // Build a shuffle mask for the output, discovering on the fly which
11731 // input vectors to use as shuffle operands (recorded in InputUsed).
11732 // If building a suitable shuffle vector proves too hard, then bail
11733 // out with UseBuildVector set.
11734 bool UseBuildVector = false;
11735 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
11736 unsigned LaneStart = l * NumLaneElems;
11737 for (unsigned i = 0; i != NumLaneElems; ++i) {
11738 // The mask element. This indexes into the input.
11739 int Idx = SVOp->getMaskElt(i+LaneStart);
11741 // the mask element does not index into any input vector.
11742 Mask.push_back(-1);
11746 // The input vector this mask element indexes into.
11747 int Input = Idx / NumLaneElems;
11749 // Turn the index into an offset from the start of the input vector.
11750 Idx -= Input * NumLaneElems;
11752 // Find or create a shuffle vector operand to hold this input.
11754 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
11755 if (InputUsed[OpNo] == Input)
11756 // This input vector is already an operand.
11758 if (InputUsed[OpNo] < 0) {
11759 // Create a new operand for this input vector.
11760 InputUsed[OpNo] = Input;
11765 if (OpNo >= array_lengthof(InputUsed)) {
11766 // More than two input vectors used! Give up on trying to create a
11767 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
11768 UseBuildVector = true;
11772 // Add the mask index for the new shuffle vector.
11773 Mask.push_back(Idx + OpNo * NumLaneElems);
11776 if (UseBuildVector) {
11777 SmallVector<SDValue, 16> SVOps;
11778 for (unsigned i = 0; i != NumLaneElems; ++i) {
11779 // The mask element. This indexes into the input.
11780 int Idx = SVOp->getMaskElt(i+LaneStart);
11782 SVOps.push_back(DAG.getUNDEF(EltVT));
11786 // The input vector this mask element indexes into.
11787 int Input = Idx / NumElems;
11789 // Turn the index into an offset from the start of the input vector.
11790 Idx -= Input * NumElems;
11792 // Extract the vector element by hand.
11793 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
11794 SVOp->getOperand(Input),
11795 DAG.getIntPtrConstant(Idx)));
11798 // Construct the output using a BUILD_VECTOR.
11799 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
11800 } else if (InputUsed[0] < 0) {
11801 // No input vectors were used! The result is undefined.
11802 Output[l] = DAG.getUNDEF(NVT);
11804 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
11805 (InputUsed[0] % 2) * NumLaneElems,
11807 // If only one input was used, use an undefined vector for the other.
11808 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
11809 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
11810 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
11811 // At least one input vector was used. Create a new shuffle vector.
11812 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
11818 // Concatenate the result back
11819 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
11822 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
11823 /// 4 elements, and match them with several different shuffle types.
11825 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
11826 SDValue V1 = SVOp->getOperand(0);
11827 SDValue V2 = SVOp->getOperand(1);
11829 MVT VT = SVOp->getSimpleValueType(0);
11831 assert(VT.is128BitVector() && "Unsupported vector size");
11833 std::pair<int, int> Locs[4];
11834 int Mask1[] = { -1, -1, -1, -1 };
11835 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
11837 unsigned NumHi = 0;
11838 unsigned NumLo = 0;
11839 for (unsigned i = 0; i != 4; ++i) {
11840 int Idx = PermMask[i];
11842 Locs[i] = std::make_pair(-1, -1);
11844 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
11846 Locs[i] = std::make_pair(0, NumLo);
11847 Mask1[NumLo] = Idx;
11850 Locs[i] = std::make_pair(1, NumHi);
11852 Mask1[2+NumHi] = Idx;
11858 if (NumLo <= 2 && NumHi <= 2) {
11859 // If no more than two elements come from either vector. This can be
11860 // implemented with two shuffles. First shuffle gather the elements.
11861 // The second shuffle, which takes the first shuffle as both of its
11862 // vector operands, put the elements into the right order.
11863 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
11865 int Mask2[] = { -1, -1, -1, -1 };
11867 for (unsigned i = 0; i != 4; ++i)
11868 if (Locs[i].first != -1) {
11869 unsigned Idx = (i < 2) ? 0 : 4;
11870 Idx += Locs[i].first * 2 + Locs[i].second;
11874 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
11877 if (NumLo == 3 || NumHi == 3) {
11878 // Otherwise, we must have three elements from one vector, call it X, and
11879 // one element from the other, call it Y. First, use a shufps to build an
11880 // intermediate vector with the one element from Y and the element from X
11881 // that will be in the same half in the final destination (the indexes don't
11882 // matter). Then, use a shufps to build the final vector, taking the half
11883 // containing the element from Y from the intermediate, and the other half
11886 // Normalize it so the 3 elements come from V1.
11887 CommuteVectorShuffleMask(PermMask, 4);
11891 // Find the element from V2.
11893 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
11894 int Val = PermMask[HiIndex];
11901 Mask1[0] = PermMask[HiIndex];
11903 Mask1[2] = PermMask[HiIndex^1];
11905 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
11907 if (HiIndex >= 2) {
11908 Mask1[0] = PermMask[0];
11909 Mask1[1] = PermMask[1];
11910 Mask1[2] = HiIndex & 1 ? 6 : 4;
11911 Mask1[3] = HiIndex & 1 ? 4 : 6;
11912 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
11915 Mask1[0] = HiIndex & 1 ? 2 : 0;
11916 Mask1[1] = HiIndex & 1 ? 0 : 2;
11917 Mask1[2] = PermMask[2];
11918 Mask1[3] = PermMask[3];
11923 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
11926 // Break it into (shuffle shuffle_hi, shuffle_lo).
11927 int LoMask[] = { -1, -1, -1, -1 };
11928 int HiMask[] = { -1, -1, -1, -1 };
11930 int *MaskPtr = LoMask;
11931 unsigned MaskIdx = 0;
11932 unsigned LoIdx = 0;
11933 unsigned HiIdx = 2;
11934 for (unsigned i = 0; i != 4; ++i) {
11941 int Idx = PermMask[i];
11943 Locs[i] = std::make_pair(-1, -1);
11944 } else if (Idx < 4) {
11945 Locs[i] = std::make_pair(MaskIdx, LoIdx);
11946 MaskPtr[LoIdx] = Idx;
11949 Locs[i] = std::make_pair(MaskIdx, HiIdx);
11950 MaskPtr[HiIdx] = Idx;
11955 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
11956 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
11957 int MaskOps[] = { -1, -1, -1, -1 };
11958 for (unsigned i = 0; i != 4; ++i)
11959 if (Locs[i].first != -1)
11960 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
11961 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
11964 static bool MayFoldVectorLoad(SDValue V) {
11965 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
11966 V = V.getOperand(0);
11968 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
11969 V = V.getOperand(0);
11970 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
11971 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
11972 // BUILD_VECTOR (load), undef
11973 V = V.getOperand(0);
11975 return MayFoldLoad(V);
11979 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
11980 MVT VT = Op.getSimpleValueType();
11982 // Canonizalize to v2f64.
11983 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
11984 return DAG.getNode(ISD::BITCAST, dl, VT,
11985 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
11990 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
11992 SDValue V1 = Op.getOperand(0);
11993 SDValue V2 = Op.getOperand(1);
11994 MVT VT = Op.getSimpleValueType();
11996 assert(VT != MVT::v2i64 && "unsupported shuffle type");
11998 if (HasSSE2 && VT == MVT::v2f64)
11999 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12001 // v4f32 or v4i32: canonizalized to v4f32 (which is legal for SSE1)
12002 return DAG.getNode(ISD::BITCAST, dl, VT,
12003 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12004 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12005 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12009 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12010 SDValue V1 = Op.getOperand(0);
12011 SDValue V2 = Op.getOperand(1);
12012 MVT VT = Op.getSimpleValueType();
12014 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12015 "unsupported shuffle type");
12017 if (V2.getOpcode() == ISD::UNDEF)
12021 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12025 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12026 SDValue V1 = Op.getOperand(0);
12027 SDValue V2 = Op.getOperand(1);
12028 MVT VT = Op.getSimpleValueType();
12029 unsigned NumElems = VT.getVectorNumElements();
12031 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12032 // operand of these instructions is only memory, so check if there's a
12033 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12035 bool CanFoldLoad = false;
12037 // Trivial case, when V2 comes from a load.
12038 if (MayFoldVectorLoad(V2))
12039 CanFoldLoad = true;
12041 // When V1 is a load, it can be folded later into a store in isel, example:
12042 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12044 // (MOVLPSmr addr:$src1, VR128:$src2)
12045 // So, recognize this potential and also use MOVLPS or MOVLPD
12046 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12047 CanFoldLoad = true;
12049 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12051 if (HasSSE2 && NumElems == 2)
12052 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12055 // If we don't care about the second element, proceed to use movss.
12056 if (SVOp->getMaskElt(1) != -1)
12057 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12060 // movl and movlp will both match v2i64, but v2i64 is never matched by
12061 // movl earlier because we make it strict to avoid messing with the movlp load
12062 // folding logic (see the code above getMOVLP call). Match it here then,
12063 // this is horrible, but will stay like this until we move all shuffle
12064 // matching to x86 specific nodes. Note that for the 1st condition all
12065 // types are matched with movsd.
12067 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12068 // as to remove this logic from here, as much as possible
12069 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12070 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12071 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12074 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12076 // Invert the operand order and use SHUFPS to match it.
12077 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12078 getShuffleSHUFImmediate(SVOp), DAG);
12081 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12082 SelectionDAG &DAG) {
12084 MVT VT = Load->getSimpleValueType(0);
12085 MVT EVT = VT.getVectorElementType();
12086 SDValue Addr = Load->getOperand(1);
12087 SDValue NewAddr = DAG.getNode(
12088 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12089 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12092 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12093 DAG.getMachineFunction().getMachineMemOperand(
12094 Load->getMemOperand(), 0, EVT.getStoreSize()));
12098 // It is only safe to call this function if isINSERTPSMask is true for
12099 // this shufflevector mask.
12100 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12101 SelectionDAG &DAG) {
12102 // Generate an insertps instruction when inserting an f32 from memory onto a
12103 // v4f32 or when copying a member from one v4f32 to another.
12104 // We also use it for transferring i32 from one register to another,
12105 // since it simply copies the same bits.
12106 // If we're transferring an i32 from memory to a specific element in a
12107 // register, we output a generic DAG that will match the PINSRD
12109 MVT VT = SVOp->getSimpleValueType(0);
12110 MVT EVT = VT.getVectorElementType();
12111 SDValue V1 = SVOp->getOperand(0);
12112 SDValue V2 = SVOp->getOperand(1);
12113 auto Mask = SVOp->getMask();
12114 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12115 "unsupported vector type for insertps/pinsrd");
12117 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12118 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12119 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12123 unsigned DestIndex;
12127 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12130 // If we have 1 element from each vector, we have to check if we're
12131 // changing V1's element's place. If so, we're done. Otherwise, we
12132 // should assume we're changing V2's element's place and behave
12134 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12135 assert(DestIndex <= INT32_MAX && "truncated destination index");
12136 if (FromV1 == FromV2 &&
12137 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12141 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12144 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12145 "More than one element from V1 and from V2, or no elements from one "
12146 "of the vectors. This case should not have returned true from "
12151 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12154 // Get an index into the source vector in the range [0,4) (the mask is
12155 // in the range [0,8) because it can address V1 and V2)
12156 unsigned SrcIndex = Mask[DestIndex] % 4;
12157 if (MayFoldLoad(From)) {
12158 // Trivial case, when From comes from a load and is only used by the
12159 // shuffle. Make it use insertps from the vector that we need from that
12162 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12163 if (!NewLoad.getNode())
12166 if (EVT == MVT::f32) {
12167 // Create this as a scalar to vector to match the instruction pattern.
12168 SDValue LoadScalarToVector =
12169 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12170 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12171 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12173 } else { // EVT == MVT::i32
12174 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12175 // instruction, to match the PINSRD instruction, which loads an i32 to a
12176 // certain vector element.
12177 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12178 DAG.getConstant(DestIndex, MVT::i32));
12182 // Vector-element-to-vector
12183 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12184 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12187 // Reduce a vector shuffle to zext.
12188 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12189 SelectionDAG &DAG) {
12190 // PMOVZX is only available from SSE41.
12191 if (!Subtarget->hasSSE41())
12194 MVT VT = Op.getSimpleValueType();
12196 // Only AVX2 support 256-bit vector integer extending.
12197 if (!Subtarget->hasInt256() && VT.is256BitVector())
12200 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12202 SDValue V1 = Op.getOperand(0);
12203 SDValue V2 = Op.getOperand(1);
12204 unsigned NumElems = VT.getVectorNumElements();
12206 // Extending is an unary operation and the element type of the source vector
12207 // won't be equal to or larger than i64.
12208 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12209 VT.getVectorElementType() == MVT::i64)
12212 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12213 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12214 while ((1U << Shift) < NumElems) {
12215 if (SVOp->getMaskElt(1U << Shift) == 1)
12218 // The maximal ratio is 8, i.e. from i8 to i64.
12223 // Check the shuffle mask.
12224 unsigned Mask = (1U << Shift) - 1;
12225 for (unsigned i = 0; i != NumElems; ++i) {
12226 int EltIdx = SVOp->getMaskElt(i);
12227 if ((i & Mask) != 0 && EltIdx != -1)
12229 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12233 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12234 MVT NeVT = MVT::getIntegerVT(NBits);
12235 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12237 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12240 return DAG.getNode(ISD::BITCAST, DL, VT,
12241 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12244 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12245 SelectionDAG &DAG) {
12246 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12247 MVT VT = Op.getSimpleValueType();
12249 SDValue V1 = Op.getOperand(0);
12250 SDValue V2 = Op.getOperand(1);
12252 if (isZeroShuffle(SVOp))
12253 return getZeroVector(VT, Subtarget, DAG, dl);
12255 // Handle splat operations
12256 if (SVOp->isSplat()) {
12257 // Use vbroadcast whenever the splat comes from a foldable load
12258 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12259 if (Broadcast.getNode())
12263 // Check integer expanding shuffles.
12264 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12265 if (NewOp.getNode())
12268 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12270 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12271 VT == MVT::v32i8) {
12272 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12273 if (NewOp.getNode())
12274 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12275 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12276 // FIXME: Figure out a cleaner way to do this.
12277 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12278 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12279 if (NewOp.getNode()) {
12280 MVT NewVT = NewOp.getSimpleValueType();
12281 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12282 NewVT, true, false))
12283 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12286 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12287 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12288 if (NewOp.getNode()) {
12289 MVT NewVT = NewOp.getSimpleValueType();
12290 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12291 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12300 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12301 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12302 SDValue V1 = Op.getOperand(0);
12303 SDValue V2 = Op.getOperand(1);
12304 MVT VT = Op.getSimpleValueType();
12306 unsigned NumElems = VT.getVectorNumElements();
12307 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12308 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12309 bool V1IsSplat = false;
12310 bool V2IsSplat = false;
12311 bool HasSSE2 = Subtarget->hasSSE2();
12312 bool HasFp256 = Subtarget->hasFp256();
12313 bool HasInt256 = Subtarget->hasInt256();
12314 MachineFunction &MF = DAG.getMachineFunction();
12315 bool OptForSize = MF.getFunction()->getAttributes().
12316 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
12318 // Check if we should use the experimental vector shuffle lowering. If so,
12319 // delegate completely to that code path.
12320 if (ExperimentalVectorShuffleLowering)
12321 return lowerVectorShuffle(Op, Subtarget, DAG);
12323 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12325 if (V1IsUndef && V2IsUndef)
12326 return DAG.getUNDEF(VT);
12328 // When we create a shuffle node we put the UNDEF node to second operand,
12329 // but in some cases the first operand may be transformed to UNDEF.
12330 // In this case we should just commute the node.
12332 return DAG.getCommutedVectorShuffle(*SVOp);
12334 // Vector shuffle lowering takes 3 steps:
12336 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12337 // narrowing and commutation of operands should be handled.
12338 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12340 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12341 // so the shuffle can be broken into other shuffles and the legalizer can
12342 // try the lowering again.
12344 // The general idea is that no vector_shuffle operation should be left to
12345 // be matched during isel, all of them must be converted to a target specific
12348 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12349 // narrowing and commutation of operands should be handled. The actual code
12350 // doesn't include all of those, work in progress...
12351 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12352 if (NewOp.getNode())
12355 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12357 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12358 // unpckh_undef). Only use pshufd if speed is more important than size.
12359 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12360 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12361 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12362 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12364 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12365 V2IsUndef && MayFoldVectorLoad(V1))
12366 return getMOVDDup(Op, dl, V1, DAG);
12368 if (isMOVHLPS_v_undef_Mask(M, VT))
12369 return getMOVHighToLow(Op, dl, DAG);
12371 // Use to match splats
12372 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12373 (VT == MVT::v2f64 || VT == MVT::v2i64))
12374 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12376 if (isPSHUFDMask(M, VT)) {
12377 // The actual implementation will match the mask in the if above and then
12378 // during isel it can match several different instructions, not only pshufd
12379 // as its name says, sad but true, emulate the behavior for now...
12380 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12381 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12383 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12385 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12386 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12388 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12389 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12392 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12396 if (isPALIGNRMask(M, VT, Subtarget))
12397 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12398 getShufflePALIGNRImmediate(SVOp),
12401 if (isVALIGNMask(M, VT, Subtarget))
12402 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12403 getShuffleVALIGNImmediate(SVOp),
12406 // Check if this can be converted into a logical shift.
12407 bool isLeft = false;
12408 unsigned ShAmt = 0;
12410 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12411 if (isShift && ShVal.hasOneUse()) {
12412 // If the shifted value has multiple uses, it may be cheaper to use
12413 // v_set0 + movlhps or movhlps, etc.
12414 MVT EltVT = VT.getVectorElementType();
12415 ShAmt *= EltVT.getSizeInBits();
12416 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12419 if (isMOVLMask(M, VT)) {
12420 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12421 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12422 if (!isMOVLPMask(M, VT)) {
12423 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12424 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12426 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12427 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12431 // FIXME: fold these into legal mask.
12432 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12433 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12435 if (isMOVHLPSMask(M, VT))
12436 return getMOVHighToLow(Op, dl, DAG);
12438 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12439 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12441 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12442 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12444 if (isMOVLPMask(M, VT))
12445 return getMOVLP(Op, dl, DAG, HasSSE2);
12447 if (ShouldXformToMOVHLPS(M, VT) ||
12448 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12449 return DAG.getCommutedVectorShuffle(*SVOp);
12452 // No better options. Use a vshldq / vsrldq.
12453 MVT EltVT = VT.getVectorElementType();
12454 ShAmt *= EltVT.getSizeInBits();
12455 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12458 bool Commuted = false;
12459 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12460 // 1,1,1,1 -> v8i16 though.
12461 BitVector UndefElements;
12462 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12463 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12465 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12466 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12469 // Canonicalize the splat or undef, if present, to be on the RHS.
12470 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12471 CommuteVectorShuffleMask(M, NumElems);
12473 std::swap(V1IsSplat, V2IsSplat);
12477 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12478 // Shuffling low element of v1 into undef, just return v1.
12481 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12482 // the instruction selector will not match, so get a canonical MOVL with
12483 // swapped operands to undo the commute.
12484 return getMOVL(DAG, dl, VT, V2, V1);
12487 if (isUNPCKLMask(M, VT, HasInt256))
12488 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12490 if (isUNPCKHMask(M, VT, HasInt256))
12491 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12494 // Normalize mask so all entries that point to V2 points to its first
12495 // element then try to match unpck{h|l} again. If match, return a
12496 // new vector_shuffle with the corrected mask.p
12497 SmallVector<int, 8> NewMask(M.begin(), M.end());
12498 NormalizeMask(NewMask, NumElems);
12499 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12500 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12501 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12502 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12506 // Commute is back and try unpck* again.
12507 // FIXME: this seems wrong.
12508 CommuteVectorShuffleMask(M, NumElems);
12510 std::swap(V1IsSplat, V2IsSplat);
12512 if (isUNPCKLMask(M, VT, HasInt256))
12513 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12515 if (isUNPCKHMask(M, VT, HasInt256))
12516 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12519 // Normalize the node to match x86 shuffle ops if needed
12520 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12521 return DAG.getCommutedVectorShuffle(*SVOp);
12523 // The checks below are all present in isShuffleMaskLegal, but they are
12524 // inlined here right now to enable us to directly emit target specific
12525 // nodes, and remove one by one until they don't return Op anymore.
12527 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
12528 SVOp->getSplatIndex() == 0 && V2IsUndef) {
12529 if (VT == MVT::v2f64 || VT == MVT::v2i64)
12530 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12533 if (isPSHUFHWMask(M, VT, HasInt256))
12534 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
12535 getShufflePSHUFHWImmediate(SVOp),
12538 if (isPSHUFLWMask(M, VT, HasInt256))
12539 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
12540 getShufflePSHUFLWImmediate(SVOp),
12543 unsigned MaskValue;
12544 if (isBlendMask(M, VT, Subtarget->hasSSE41(), Subtarget->hasInt256(),
12546 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
12548 if (isSHUFPMask(M, VT))
12549 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
12550 getShuffleSHUFImmediate(SVOp), DAG);
12552 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12553 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12554 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12555 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12557 //===--------------------------------------------------------------------===//
12558 // Generate target specific nodes for 128 or 256-bit shuffles only
12559 // supported in the AVX instruction set.
12562 // Handle VMOVDDUPY permutations
12563 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
12564 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
12566 // Handle VPERMILPS/D* permutations
12567 if (isVPERMILPMask(M, VT)) {
12568 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
12569 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
12570 getShuffleSHUFImmediate(SVOp), DAG);
12571 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
12572 getShuffleSHUFImmediate(SVOp), DAG);
12576 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
12577 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
12578 Idx*(NumElems/2), DAG, dl);
12580 // Handle VPERM2F128/VPERM2I128 permutations
12581 if (isVPERM2X128Mask(M, VT, HasFp256))
12582 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
12583 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
12585 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
12586 return getINSERTPS(SVOp, dl, DAG);
12589 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
12590 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
12592 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
12593 VT.is512BitVector()) {
12594 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
12595 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
12596 SmallVector<SDValue, 16> permclMask;
12597 for (unsigned i = 0; i != NumElems; ++i) {
12598 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
12601 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
12603 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
12604 return DAG.getNode(X86ISD::VPERMV, dl, VT,
12605 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
12606 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
12607 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
12610 //===--------------------------------------------------------------------===//
12611 // Since no target specific shuffle was selected for this generic one,
12612 // lower it into other known shuffles. FIXME: this isn't true yet, but
12613 // this is the plan.
12616 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
12617 if (VT == MVT::v8i16) {
12618 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
12619 if (NewOp.getNode())
12623 if (VT == MVT::v16i16 && Subtarget->hasInt256()) {
12624 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
12625 if (NewOp.getNode())
12629 if (VT == MVT::v16i8) {
12630 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
12631 if (NewOp.getNode())
12635 if (VT == MVT::v32i8) {
12636 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
12637 if (NewOp.getNode())
12641 // Handle all 128-bit wide vectors with 4 elements, and match them with
12642 // several different shuffle types.
12643 if (NumElems == 4 && VT.is128BitVector())
12644 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
12646 // Handle general 256-bit shuffles
12647 if (VT.is256BitVector())
12648 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
12653 // This function assumes its argument is a BUILD_VECTOR of constants or
12654 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
12656 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
12657 unsigned &MaskValue) {
12659 unsigned NumElems = BuildVector->getNumOperands();
12660 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
12661 unsigned NumLanes = (NumElems - 1) / 8 + 1;
12662 unsigned NumElemsInLane = NumElems / NumLanes;
12664 // Blend for v16i16 should be symetric for the both lanes.
12665 for (unsigned i = 0; i < NumElemsInLane; ++i) {
12666 SDValue EltCond = BuildVector->getOperand(i);
12667 SDValue SndLaneEltCond =
12668 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
12670 int Lane1Cond = -1, Lane2Cond = -1;
12671 if (isa<ConstantSDNode>(EltCond))
12672 Lane1Cond = !isZero(EltCond);
12673 if (isa<ConstantSDNode>(SndLaneEltCond))
12674 Lane2Cond = !isZero(SndLaneEltCond);
12676 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
12677 // Lane1Cond != 0, means we want the first argument.
12678 // Lane1Cond == 0, means we want the second argument.
12679 // The encoding of this argument is 0 for the first argument, 1
12680 // for the second. Therefore, invert the condition.
12681 MaskValue |= !Lane1Cond << i;
12682 else if (Lane1Cond < 0)
12683 MaskValue |= !Lane2Cond << i;
12690 /// \brief Try to lower a VSELECT instruction to an immediate-controlled blend
12692 static SDValue lowerVSELECTtoBLENDI(SDValue Op, const X86Subtarget *Subtarget,
12693 SelectionDAG &DAG) {
12694 SDValue Cond = Op.getOperand(0);
12695 SDValue LHS = Op.getOperand(1);
12696 SDValue RHS = Op.getOperand(2);
12698 MVT VT = Op.getSimpleValueType();
12699 MVT EltVT = VT.getVectorElementType();
12700 unsigned NumElems = VT.getVectorNumElements();
12702 // There is no blend with immediate in AVX-512.
12703 if (VT.is512BitVector())
12706 if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
12708 if (!Subtarget->hasInt256() && VT == MVT::v16i16)
12711 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
12714 // Check the mask for BLEND and build the value.
12715 unsigned MaskValue = 0;
12716 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
12719 // Convert i32 vectors to floating point if it is not AVX2.
12720 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
12722 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
12723 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
12725 LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
12726 RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
12729 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
12730 DAG.getConstant(MaskValue, MVT::i32));
12731 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
12734 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
12735 // A vselect where all conditions and data are constants can be optimized into
12736 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
12737 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
12738 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
12739 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
12742 SDValue BlendOp = lowerVSELECTtoBLENDI(Op, Subtarget, DAG);
12743 if (BlendOp.getNode())
12746 // Some types for vselect were previously set to Expand, not Legal or
12747 // Custom. Return an empty SDValue so we fall-through to Expand, after
12748 // the Custom lowering phase.
12749 MVT VT = Op.getSimpleValueType();
12750 switch (VT.SimpleTy) {
12755 if (Subtarget->hasBWI() && Subtarget->hasVLX())
12760 // We couldn't create a "Blend with immediate" node.
12761 // This node should still be legal, but we'll have to emit a blendv*
12766 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
12767 MVT VT = Op.getSimpleValueType();
12770 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
12773 if (VT.getSizeInBits() == 8) {
12774 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
12775 Op.getOperand(0), Op.getOperand(1));
12776 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
12777 DAG.getValueType(VT));
12778 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12781 if (VT.getSizeInBits() == 16) {
12782 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12783 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
12785 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
12786 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12787 DAG.getNode(ISD::BITCAST, dl,
12790 Op.getOperand(1)));
12791 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
12792 Op.getOperand(0), Op.getOperand(1));
12793 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
12794 DAG.getValueType(VT));
12795 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12798 if (VT == MVT::f32) {
12799 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
12800 // the result back to FR32 register. It's only worth matching if the
12801 // result has a single use which is a store or a bitcast to i32. And in
12802 // the case of a store, it's not worth it if the index is a constant 0,
12803 // because a MOVSSmr can be used instead, which is smaller and faster.
12804 if (!Op.hasOneUse())
12806 SDNode *User = *Op.getNode()->use_begin();
12807 if ((User->getOpcode() != ISD::STORE ||
12808 (isa<ConstantSDNode>(Op.getOperand(1)) &&
12809 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
12810 (User->getOpcode() != ISD::BITCAST ||
12811 User->getValueType(0) != MVT::i32))
12813 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12814 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
12817 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
12820 if (VT == MVT::i32 || VT == MVT::i64) {
12821 // ExtractPS/pextrq works with constant index.
12822 if (isa<ConstantSDNode>(Op.getOperand(1)))
12828 /// Extract one bit from mask vector, like v16i1 or v8i1.
12829 /// AVX-512 feature.
12831 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
12832 SDValue Vec = Op.getOperand(0);
12834 MVT VecVT = Vec.getSimpleValueType();
12835 SDValue Idx = Op.getOperand(1);
12836 MVT EltVT = Op.getSimpleValueType();
12838 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
12839 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
12840 "Unexpected vector type in ExtractBitFromMaskVector");
12842 // variable index can't be handled in mask registers,
12843 // extend vector to VR512
12844 if (!isa<ConstantSDNode>(Idx)) {
12845 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
12846 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
12847 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
12848 ExtVT.getVectorElementType(), Ext, Idx);
12849 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
12852 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12853 const TargetRegisterClass* rc = getRegClassFor(VecVT);
12854 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
12855 rc = getRegClassFor(MVT::v16i1);
12856 unsigned MaxSift = rc->getSize()*8 - 1;
12857 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
12858 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
12859 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
12860 DAG.getConstant(MaxSift, MVT::i8));
12861 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
12862 DAG.getIntPtrConstant(0));
12866 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
12867 SelectionDAG &DAG) const {
12869 SDValue Vec = Op.getOperand(0);
12870 MVT VecVT = Vec.getSimpleValueType();
12871 SDValue Idx = Op.getOperand(1);
12873 if (Op.getSimpleValueType() == MVT::i1)
12874 return ExtractBitFromMaskVector(Op, DAG);
12876 if (!isa<ConstantSDNode>(Idx)) {
12877 if (VecVT.is512BitVector() ||
12878 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
12879 VecVT.getVectorElementType().getSizeInBits() == 32)) {
12882 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
12883 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
12884 MaskEltVT.getSizeInBits());
12886 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
12887 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
12888 getZeroVector(MaskVT, Subtarget, DAG, dl),
12889 Idx, DAG.getConstant(0, getPointerTy()));
12890 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
12891 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
12892 Perm, DAG.getConstant(0, getPointerTy()));
12897 // If this is a 256-bit vector result, first extract the 128-bit vector and
12898 // then extract the element from the 128-bit vector.
12899 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
12901 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
12902 // Get the 128-bit vector.
12903 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
12904 MVT EltVT = VecVT.getVectorElementType();
12906 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
12908 //if (IdxVal >= NumElems/2)
12909 // IdxVal -= NumElems/2;
12910 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
12911 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
12912 DAG.getConstant(IdxVal, MVT::i32));
12915 assert(VecVT.is128BitVector() && "Unexpected vector length");
12917 if (Subtarget->hasSSE41()) {
12918 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
12923 MVT VT = Op.getSimpleValueType();
12924 // TODO: handle v16i8.
12925 if (VT.getSizeInBits() == 16) {
12926 SDValue Vec = Op.getOperand(0);
12927 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12929 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
12930 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
12931 DAG.getNode(ISD::BITCAST, dl,
12933 Op.getOperand(1)));
12934 // Transform it so it match pextrw which produces a 32-bit result.
12935 MVT EltVT = MVT::i32;
12936 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
12937 Op.getOperand(0), Op.getOperand(1));
12938 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
12939 DAG.getValueType(VT));
12940 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
12943 if (VT.getSizeInBits() == 32) {
12944 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12948 // SHUFPS the element to the lowest double word, then movss.
12949 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
12950 MVT VVT = Op.getOperand(0).getSimpleValueType();
12951 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
12952 DAG.getUNDEF(VVT), Mask);
12953 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
12954 DAG.getIntPtrConstant(0));
12957 if (VT.getSizeInBits() == 64) {
12958 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
12959 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
12960 // to match extract_elt for f64.
12961 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
12965 // UNPCKHPD the element to the lowest double word, then movsd.
12966 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
12967 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
12968 int Mask[2] = { 1, -1 };
12969 MVT VVT = Op.getOperand(0).getSimpleValueType();
12970 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
12971 DAG.getUNDEF(VVT), Mask);
12972 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
12973 DAG.getIntPtrConstant(0));
12979 /// Insert one bit to mask vector, like v16i1 or v8i1.
12980 /// AVX-512 feature.
12982 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
12984 SDValue Vec = Op.getOperand(0);
12985 SDValue Elt = Op.getOperand(1);
12986 SDValue Idx = Op.getOperand(2);
12987 MVT VecVT = Vec.getSimpleValueType();
12989 if (!isa<ConstantSDNode>(Idx)) {
12990 // Non constant index. Extend source and destination,
12991 // insert element and then truncate the result.
12992 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
12993 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
12994 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
12995 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
12996 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
12997 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13000 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13001 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13002 if (Vec.getOpcode() == ISD::UNDEF)
13003 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13004 DAG.getConstant(IdxVal, MVT::i8));
13005 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13006 unsigned MaxSift = rc->getSize()*8 - 1;
13007 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13008 DAG.getConstant(MaxSift, MVT::i8));
13009 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13010 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13011 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13014 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13015 SelectionDAG &DAG) const {
13016 MVT VT = Op.getSimpleValueType();
13017 MVT EltVT = VT.getVectorElementType();
13019 if (EltVT == MVT::i1)
13020 return InsertBitToMaskVector(Op, DAG);
13023 SDValue N0 = Op.getOperand(0);
13024 SDValue N1 = Op.getOperand(1);
13025 SDValue N2 = Op.getOperand(2);
13026 if (!isa<ConstantSDNode>(N2))
13028 auto *N2C = cast<ConstantSDNode>(N2);
13029 unsigned IdxVal = N2C->getZExtValue();
13031 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13032 // into that, and then insert the subvector back into the result.
13033 if (VT.is256BitVector() || VT.is512BitVector()) {
13034 // Get the desired 128-bit vector half.
13035 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13037 // Insert the element into the desired half.
13038 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13039 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13041 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13042 DAG.getConstant(IdxIn128, MVT::i32));
13044 // Insert the changed part back to the 256-bit vector
13045 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13047 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13049 if (Subtarget->hasSSE41()) {
13050 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13052 if (VT == MVT::v8i16) {
13053 Opc = X86ISD::PINSRW;
13055 assert(VT == MVT::v16i8);
13056 Opc = X86ISD::PINSRB;
13059 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13061 if (N1.getValueType() != MVT::i32)
13062 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13063 if (N2.getValueType() != MVT::i32)
13064 N2 = DAG.getIntPtrConstant(IdxVal);
13065 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13068 if (EltVT == MVT::f32) {
13069 // Bits [7:6] of the constant are the source select. This will always be
13070 // zero here. The DAG Combiner may combine an extract_elt index into
13072 // bits. For example (insert (extract, 3), 2) could be matched by
13074 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13075 // Bits [5:4] of the constant are the destination select. This is the
13076 // value of the incoming immediate.
13077 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13078 // combine either bitwise AND or insert of float 0.0 to set these bits.
13079 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13080 // Create this as a scalar to vector..
13081 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13082 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13085 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13086 // PINSR* works with constant index.
13091 if (EltVT == MVT::i8)
13094 if (EltVT.getSizeInBits() == 16) {
13095 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13096 // as its second argument.
13097 if (N1.getValueType() != MVT::i32)
13098 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13099 if (N2.getValueType() != MVT::i32)
13100 N2 = DAG.getIntPtrConstant(IdxVal);
13101 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13106 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13108 MVT OpVT = Op.getSimpleValueType();
13110 // If this is a 256-bit vector result, first insert into a 128-bit
13111 // vector and then insert into the 256-bit vector.
13112 if (!OpVT.is128BitVector()) {
13113 // Insert into a 128-bit vector.
13114 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13115 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13116 OpVT.getVectorNumElements() / SizeFactor);
13118 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13120 // Insert the 128-bit vector.
13121 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13124 if (OpVT == MVT::v1i64 &&
13125 Op.getOperand(0).getValueType() == MVT::i64)
13126 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13128 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13129 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13130 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13131 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13134 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13135 // a simple subregister reference or explicit instructions to grab
13136 // upper bits of a vector.
13137 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13138 SelectionDAG &DAG) {
13140 SDValue In = Op.getOperand(0);
13141 SDValue Idx = Op.getOperand(1);
13142 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13143 MVT ResVT = Op.getSimpleValueType();
13144 MVT InVT = In.getSimpleValueType();
13146 if (Subtarget->hasFp256()) {
13147 if (ResVT.is128BitVector() &&
13148 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13149 isa<ConstantSDNode>(Idx)) {
13150 return Extract128BitVector(In, IdxVal, DAG, dl);
13152 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13153 isa<ConstantSDNode>(Idx)) {
13154 return Extract256BitVector(In, IdxVal, DAG, dl);
13160 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13161 // simple superregister reference or explicit instructions to insert
13162 // the upper bits of a vector.
13163 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13164 SelectionDAG &DAG) {
13165 if (!Subtarget->hasAVX())
13169 SDValue Vec = Op.getOperand(0);
13170 SDValue SubVec = Op.getOperand(1);
13171 SDValue Idx = Op.getOperand(2);
13172 MVT OpVT = Op.getSimpleValueType();
13173 MVT SubVecVT = SubVec.getSimpleValueType();
13175 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13176 SubVecVT.is128BitVector() && isa<ConstantSDNode>(Idx)) {
13177 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13178 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13181 if (OpVT.is512BitVector() &&
13182 SubVecVT.is256BitVector() && isa<ConstantSDNode>(Idx)) {
13183 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13184 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13190 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13191 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13192 // one of the above mentioned nodes. It has to be wrapped because otherwise
13193 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13194 // be used to form addressing mode. These wrapped nodes will be selected
13197 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13198 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13200 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13201 // global base reg.
13202 unsigned char OpFlag = 0;
13203 unsigned WrapperKind = X86ISD::Wrapper;
13204 CodeModel::Model M = DAG.getTarget().getCodeModel();
13206 if (Subtarget->isPICStyleRIPRel() &&
13207 (M == CodeModel::Small || M == CodeModel::Kernel))
13208 WrapperKind = X86ISD::WrapperRIP;
13209 else if (Subtarget->isPICStyleGOT())
13210 OpFlag = X86II::MO_GOTOFF;
13211 else if (Subtarget->isPICStyleStubPIC())
13212 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13214 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13215 CP->getAlignment(),
13216 CP->getOffset(), OpFlag);
13218 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13219 // With PIC, the address is actually $g + Offset.
13221 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13222 DAG.getNode(X86ISD::GlobalBaseReg,
13223 SDLoc(), getPointerTy()),
13230 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13231 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13233 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13234 // global base reg.
13235 unsigned char OpFlag = 0;
13236 unsigned WrapperKind = X86ISD::Wrapper;
13237 CodeModel::Model M = DAG.getTarget().getCodeModel();
13239 if (Subtarget->isPICStyleRIPRel() &&
13240 (M == CodeModel::Small || M == CodeModel::Kernel))
13241 WrapperKind = X86ISD::WrapperRIP;
13242 else if (Subtarget->isPICStyleGOT())
13243 OpFlag = X86II::MO_GOTOFF;
13244 else if (Subtarget->isPICStyleStubPIC())
13245 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13247 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13250 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13252 // With PIC, the address is actually $g + Offset.
13254 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13255 DAG.getNode(X86ISD::GlobalBaseReg,
13256 SDLoc(), getPointerTy()),
13263 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13264 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13266 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13267 // global base reg.
13268 unsigned char OpFlag = 0;
13269 unsigned WrapperKind = X86ISD::Wrapper;
13270 CodeModel::Model M = DAG.getTarget().getCodeModel();
13272 if (Subtarget->isPICStyleRIPRel() &&
13273 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13274 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13275 OpFlag = X86II::MO_GOTPCREL;
13276 WrapperKind = X86ISD::WrapperRIP;
13277 } else if (Subtarget->isPICStyleGOT()) {
13278 OpFlag = X86II::MO_GOT;
13279 } else if (Subtarget->isPICStyleStubPIC()) {
13280 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13281 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13282 OpFlag = X86II::MO_DARWIN_NONLAZY;
13285 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13288 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13290 // With PIC, the address is actually $g + Offset.
13291 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13292 !Subtarget->is64Bit()) {
13293 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13294 DAG.getNode(X86ISD::GlobalBaseReg,
13295 SDLoc(), getPointerTy()),
13299 // For symbols that require a load from a stub to get the address, emit the
13301 if (isGlobalStubReference(OpFlag))
13302 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13303 MachinePointerInfo::getGOT(), false, false, false, 0);
13309 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13310 // Create the TargetBlockAddressAddress node.
13311 unsigned char OpFlags =
13312 Subtarget->ClassifyBlockAddressReference();
13313 CodeModel::Model M = DAG.getTarget().getCodeModel();
13314 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13315 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13317 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13320 if (Subtarget->isPICStyleRIPRel() &&
13321 (M == CodeModel::Small || M == CodeModel::Kernel))
13322 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13324 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13326 // With PIC, the address is actually $g + Offset.
13327 if (isGlobalRelativeToPICBase(OpFlags)) {
13328 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13329 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13337 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13338 int64_t Offset, SelectionDAG &DAG) const {
13339 // Create the TargetGlobalAddress node, folding in the constant
13340 // offset if it is legal.
13341 unsigned char OpFlags =
13342 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13343 CodeModel::Model M = DAG.getTarget().getCodeModel();
13345 if (OpFlags == X86II::MO_NO_FLAG &&
13346 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13347 // A direct static reference to a global.
13348 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13351 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13354 if (Subtarget->isPICStyleRIPRel() &&
13355 (M == CodeModel::Small || M == CodeModel::Kernel))
13356 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13358 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13360 // With PIC, the address is actually $g + Offset.
13361 if (isGlobalRelativeToPICBase(OpFlags)) {
13362 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13363 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13367 // For globals that require a load from a stub to get the address, emit the
13369 if (isGlobalStubReference(OpFlags))
13370 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13371 MachinePointerInfo::getGOT(), false, false, false, 0);
13373 // If there was a non-zero offset that we didn't fold, create an explicit
13374 // addition for it.
13376 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13377 DAG.getConstant(Offset, getPointerTy()));
13383 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13384 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13385 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13386 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13390 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13391 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13392 unsigned char OperandFlags, bool LocalDynamic = false) {
13393 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13394 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13396 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13397 GA->getValueType(0),
13401 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13405 SDValue Ops[] = { Chain, TGA, *InFlag };
13406 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13408 SDValue Ops[] = { Chain, TGA };
13409 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13412 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13413 MFI->setAdjustsStack(true);
13414 MFI->setHasCalls(true);
13416 SDValue Flag = Chain.getValue(1);
13417 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13420 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13422 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13425 SDLoc dl(GA); // ? function entry point might be better
13426 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13427 DAG.getNode(X86ISD::GlobalBaseReg,
13428 SDLoc(), PtrVT), InFlag);
13429 InFlag = Chain.getValue(1);
13431 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13434 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13436 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13438 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13439 X86::RAX, X86II::MO_TLSGD);
13442 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13448 // Get the start address of the TLS block for this module.
13449 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13450 .getInfo<X86MachineFunctionInfo>();
13451 MFI->incNumLocalDynamicTLSAccesses();
13455 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13456 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13459 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13460 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13461 InFlag = Chain.getValue(1);
13462 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13463 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13466 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13470 unsigned char OperandFlags = X86II::MO_DTPOFF;
13471 unsigned WrapperKind = X86ISD::Wrapper;
13472 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13473 GA->getValueType(0),
13474 GA->getOffset(), OperandFlags);
13475 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13477 // Add x@dtpoff with the base.
13478 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13481 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13482 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13483 const EVT PtrVT, TLSModel::Model model,
13484 bool is64Bit, bool isPIC) {
13487 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13488 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13489 is64Bit ? 257 : 256));
13491 SDValue ThreadPointer =
13492 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13493 MachinePointerInfo(Ptr), false, false, false, 0);
13495 unsigned char OperandFlags = 0;
13496 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13498 unsigned WrapperKind = X86ISD::Wrapper;
13499 if (model == TLSModel::LocalExec) {
13500 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13501 } else if (model == TLSModel::InitialExec) {
13503 OperandFlags = X86II::MO_GOTTPOFF;
13504 WrapperKind = X86ISD::WrapperRIP;
13506 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13509 llvm_unreachable("Unexpected model");
13512 // emit "addl x@ntpoff,%eax" (local exec)
13513 // or "addl x@indntpoff,%eax" (initial exec)
13514 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13516 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13517 GA->getOffset(), OperandFlags);
13518 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13520 if (model == TLSModel::InitialExec) {
13521 if (isPIC && !is64Bit) {
13522 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13523 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13527 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13528 MachinePointerInfo::getGOT(), false, false, false, 0);
13531 // The address of the thread local variable is the add of the thread
13532 // pointer with the offset of the variable.
13533 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13537 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13539 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13540 const GlobalValue *GV = GA->getGlobal();
13542 if (Subtarget->isTargetELF()) {
13543 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13546 case TLSModel::GeneralDynamic:
13547 if (Subtarget->is64Bit())
13548 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
13549 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
13550 case TLSModel::LocalDynamic:
13551 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
13552 Subtarget->is64Bit());
13553 case TLSModel::InitialExec:
13554 case TLSModel::LocalExec:
13555 return LowerToTLSExecModel(
13556 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
13557 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
13559 llvm_unreachable("Unknown TLS model.");
13562 if (Subtarget->isTargetDarwin()) {
13563 // Darwin only has one model of TLS. Lower to that.
13564 unsigned char OpFlag = 0;
13565 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
13566 X86ISD::WrapperRIP : X86ISD::Wrapper;
13568 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13569 // global base reg.
13570 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
13571 !Subtarget->is64Bit();
13573 OpFlag = X86II::MO_TLVP_PIC_BASE;
13575 OpFlag = X86II::MO_TLVP;
13577 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
13578 GA->getValueType(0),
13579 GA->getOffset(), OpFlag);
13580 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13582 // With PIC32, the address is actually $g + Offset.
13584 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13585 DAG.getNode(X86ISD::GlobalBaseReg,
13586 SDLoc(), getPointerTy()),
13589 // Lowering the machine isd will make sure everything is in the right
13591 SDValue Chain = DAG.getEntryNode();
13592 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13593 SDValue Args[] = { Chain, Offset };
13594 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
13596 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
13597 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13598 MFI->setAdjustsStack(true);
13600 // And our return value (tls address) is in the standard call return value
13602 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
13603 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
13604 Chain.getValue(1));
13607 if (Subtarget->isTargetKnownWindowsMSVC() ||
13608 Subtarget->isTargetWindowsGNU()) {
13609 // Just use the implicit TLS architecture
13610 // Need to generate someting similar to:
13611 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
13613 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
13614 // mov rcx, qword [rdx+rcx*8]
13615 // mov eax, .tls$:tlsvar
13616 // [rax+rcx] contains the address
13617 // Windows 64bit: gs:0x58
13618 // Windows 32bit: fs:__tls_array
13621 SDValue Chain = DAG.getEntryNode();
13623 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
13624 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
13625 // use its literal value of 0x2C.
13626 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
13627 ? Type::getInt8PtrTy(*DAG.getContext(),
13629 : Type::getInt32PtrTy(*DAG.getContext(),
13633 Subtarget->is64Bit()
13634 ? DAG.getIntPtrConstant(0x58)
13635 : (Subtarget->isTargetWindowsGNU()
13636 ? DAG.getIntPtrConstant(0x2C)
13637 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
13639 SDValue ThreadPointer =
13640 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
13641 MachinePointerInfo(Ptr), false, false, false, 0);
13643 // Load the _tls_index variable
13644 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
13645 if (Subtarget->is64Bit())
13646 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
13647 IDX, MachinePointerInfo(), MVT::i32,
13648 false, false, false, 0);
13650 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
13651 false, false, false, 0);
13653 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
13655 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
13657 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
13658 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
13659 false, false, false, 0);
13661 // Get the offset of start of .tls section
13662 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13663 GA->getValueType(0),
13664 GA->getOffset(), X86II::MO_SECREL);
13665 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
13667 // The address of the thread local variable is the add of the thread
13668 // pointer with the offset of the variable.
13669 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
13672 llvm_unreachable("TLS not implemented for this target.");
13675 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
13676 /// and take a 2 x i32 value to shift plus a shift amount.
13677 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
13678 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
13679 MVT VT = Op.getSimpleValueType();
13680 unsigned VTBits = VT.getSizeInBits();
13682 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
13683 SDValue ShOpLo = Op.getOperand(0);
13684 SDValue ShOpHi = Op.getOperand(1);
13685 SDValue ShAmt = Op.getOperand(2);
13686 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
13687 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
13689 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
13690 DAG.getConstant(VTBits - 1, MVT::i8));
13691 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
13692 DAG.getConstant(VTBits - 1, MVT::i8))
13693 : DAG.getConstant(0, VT);
13695 SDValue Tmp2, Tmp3;
13696 if (Op.getOpcode() == ISD::SHL_PARTS) {
13697 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
13698 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
13700 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
13701 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
13704 // If the shift amount is larger or equal than the width of a part we can't
13705 // rely on the results of shld/shrd. Insert a test and select the appropriate
13706 // values for large shift amounts.
13707 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
13708 DAG.getConstant(VTBits, MVT::i8));
13709 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
13710 AndNode, DAG.getConstant(0, MVT::i8));
13713 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
13714 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
13715 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
13717 if (Op.getOpcode() == ISD::SHL_PARTS) {
13718 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
13719 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
13721 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
13722 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
13725 SDValue Ops[2] = { Lo, Hi };
13726 return DAG.getMergeValues(Ops, dl);
13729 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
13730 SelectionDAG &DAG) const {
13731 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
13734 if (SrcVT.isVector()) {
13735 if (SrcVT.getVectorElementType() == MVT::i1) {
13736 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
13737 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
13738 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
13739 Op.getOperand(0)));
13744 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
13745 "Unknown SINT_TO_FP to lower!");
13747 // These are really Legal; return the operand so the caller accepts it as
13749 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
13751 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
13752 Subtarget->is64Bit()) {
13756 unsigned Size = SrcVT.getSizeInBits()/8;
13757 MachineFunction &MF = DAG.getMachineFunction();
13758 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
13759 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
13760 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
13762 MachinePointerInfo::getFixedStack(SSFI),
13764 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
13767 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
13769 SelectionDAG &DAG) const {
13773 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
13775 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
13777 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
13779 unsigned ByteSize = SrcVT.getSizeInBits()/8;
13781 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
13782 MachineMemOperand *MMO;
13784 int SSFI = FI->getIndex();
13786 DAG.getMachineFunction()
13787 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
13788 MachineMemOperand::MOLoad, ByteSize, ByteSize);
13790 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
13791 StackSlot = StackSlot.getOperand(1);
13793 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
13794 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
13796 Tys, Ops, SrcVT, MMO);
13799 Chain = Result.getValue(1);
13800 SDValue InFlag = Result.getValue(2);
13802 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
13803 // shouldn't be necessary except that RFP cannot be live across
13804 // multiple blocks. When stackifier is fixed, they can be uncoupled.
13805 MachineFunction &MF = DAG.getMachineFunction();
13806 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
13807 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
13808 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
13809 Tys = DAG.getVTList(MVT::Other);
13811 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
13813 MachineMemOperand *MMO =
13814 DAG.getMachineFunction()
13815 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
13816 MachineMemOperand::MOStore, SSFISize, SSFISize);
13818 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
13819 Ops, Op.getValueType(), MMO);
13820 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
13821 MachinePointerInfo::getFixedStack(SSFI),
13822 false, false, false, 0);
13828 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
13829 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
13830 SelectionDAG &DAG) const {
13831 // This algorithm is not obvious. Here it is what we're trying to output:
13834 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
13835 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
13837 haddpd %xmm0, %xmm0
13839 pshufd $0x4e, %xmm0, %xmm1
13845 LLVMContext *Context = DAG.getContext();
13847 // Build some magic constants.
13848 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
13849 Constant *C0 = ConstantDataVector::get(*Context, CV0);
13850 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
13852 SmallVector<Constant*,2> CV1;
13854 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
13855 APInt(64, 0x4330000000000000ULL))));
13857 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
13858 APInt(64, 0x4530000000000000ULL))));
13859 Constant *C1 = ConstantVector::get(CV1);
13860 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
13862 // Load the 64-bit value into an XMM register.
13863 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
13865 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
13866 MachinePointerInfo::getConstantPool(),
13867 false, false, false, 16);
13868 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
13869 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
13872 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
13873 MachinePointerInfo::getConstantPool(),
13874 false, false, false, 16);
13875 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
13876 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
13879 if (Subtarget->hasSSE3()) {
13880 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
13881 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
13883 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
13884 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
13886 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
13887 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
13891 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
13892 DAG.getIntPtrConstant(0));
13895 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
13896 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
13897 SelectionDAG &DAG) const {
13899 // FP constant to bias correct the final result.
13900 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
13903 // Load the 32-bit value into an XMM register.
13904 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
13907 // Zero out the upper parts of the register.
13908 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
13910 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
13911 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
13912 DAG.getIntPtrConstant(0));
13914 // Or the load with the bias.
13915 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
13916 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
13917 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
13918 MVT::v2f64, Load)),
13919 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
13920 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
13921 MVT::v2f64, Bias)));
13922 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
13923 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
13924 DAG.getIntPtrConstant(0));
13926 // Subtract the bias.
13927 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
13929 // Handle final rounding.
13930 EVT DestVT = Op.getValueType();
13932 if (DestVT.bitsLT(MVT::f64))
13933 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
13934 DAG.getIntPtrConstant(0));
13935 if (DestVT.bitsGT(MVT::f64))
13936 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
13938 // Handle final rounding.
13942 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
13943 const X86Subtarget &Subtarget) {
13944 // The algorithm is the following:
13945 // #ifdef __SSE4_1__
13946 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
13947 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
13948 // (uint4) 0x53000000, 0xaa);
13950 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
13951 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
13953 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
13954 // return (float4) lo + fhi;
13957 SDValue V = Op->getOperand(0);
13958 EVT VecIntVT = V.getValueType();
13959 bool Is128 = VecIntVT == MVT::v4i32;
13960 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
13961 // If we convert to something else than the supported type, e.g., to v4f64,
13963 if (VecFloatVT != Op->getValueType(0))
13966 unsigned NumElts = VecIntVT.getVectorNumElements();
13967 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
13968 "Unsupported custom type");
13969 assert(NumElts <= 8 && "The size of the constant array must be fixed");
13971 // In the #idef/#else code, we have in common:
13972 // - The vector of constants:
13978 // Create the splat vector for 0x4b000000.
13979 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
13980 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
13981 CstLow, CstLow, CstLow, CstLow};
13982 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
13983 makeArrayRef(&CstLowArray[0], NumElts));
13984 // Create the splat vector for 0x53000000.
13985 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
13986 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
13987 CstHigh, CstHigh, CstHigh, CstHigh};
13988 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
13989 makeArrayRef(&CstHighArray[0], NumElts));
13991 // Create the right shift.
13992 SDValue CstShift = DAG.getConstant(16, MVT::i32);
13993 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
13994 CstShift, CstShift, CstShift, CstShift};
13995 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
13996 makeArrayRef(&CstShiftArray[0], NumElts));
13997 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14000 if (Subtarget.hasSSE41()) {
14001 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14002 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14003 SDValue VecCstLowBitcast =
14004 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14005 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14006 // Low will be bitcasted right away, so do not bother bitcasting back to its
14008 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14009 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14010 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14011 // (uint4) 0x53000000, 0xaa);
14012 SDValue VecCstHighBitcast =
14013 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14014 SDValue VecShiftBitcast =
14015 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14016 // High will be bitcasted right away, so do not bother bitcasting back to
14017 // its original type.
14018 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14019 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14021 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14022 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14023 CstMask, CstMask, CstMask);
14024 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14025 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14026 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14028 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14029 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14032 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14033 SDValue CstFAdd = DAG.getConstantFP(
14034 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14035 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14036 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14037 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14038 makeArrayRef(&CstFAddArray[0], NumElts));
14040 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14041 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14043 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14044 // return (float4) lo + fhi;
14045 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14046 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14049 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14050 SelectionDAG &DAG) const {
14051 SDValue N0 = Op.getOperand(0);
14052 MVT SVT = N0.getSimpleValueType();
14055 switch (SVT.SimpleTy) {
14057 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14062 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14063 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14064 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14068 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14070 llvm_unreachable(nullptr);
14073 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14074 SelectionDAG &DAG) const {
14075 SDValue N0 = Op.getOperand(0);
14078 if (Op.getValueType().isVector())
14079 return lowerUINT_TO_FP_vec(Op, DAG);
14081 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14082 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14083 // the optimization here.
14084 if (DAG.SignBitIsZero(N0))
14085 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14087 MVT SrcVT = N0.getSimpleValueType();
14088 MVT DstVT = Op.getSimpleValueType();
14089 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14090 return LowerUINT_TO_FP_i64(Op, DAG);
14091 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14092 return LowerUINT_TO_FP_i32(Op, DAG);
14093 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14096 // Make a 64-bit buffer, and use it to build an FILD.
14097 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14098 if (SrcVT == MVT::i32) {
14099 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14100 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14101 getPointerTy(), StackSlot, WordOff);
14102 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14103 StackSlot, MachinePointerInfo(),
14105 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14106 OffsetSlot, MachinePointerInfo(),
14108 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14112 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14113 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14114 StackSlot, MachinePointerInfo(),
14116 // For i64 source, we need to add the appropriate power of 2 if the input
14117 // was negative. This is the same as the optimization in
14118 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14119 // we must be careful to do the computation in x87 extended precision, not
14120 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14121 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14122 MachineMemOperand *MMO =
14123 DAG.getMachineFunction()
14124 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14125 MachineMemOperand::MOLoad, 8, 8);
14127 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14128 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14129 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14132 APInt FF(32, 0x5F800000ULL);
14134 // Check whether the sign bit is set.
14135 SDValue SignSet = DAG.getSetCC(dl,
14136 getSetCCResultType(*DAG.getContext(), MVT::i64),
14137 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14140 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14141 SDValue FudgePtr = DAG.getConstantPool(
14142 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14145 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14146 SDValue Zero = DAG.getIntPtrConstant(0);
14147 SDValue Four = DAG.getIntPtrConstant(4);
14148 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14150 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14152 // Load the value out, extending it from f32 to f80.
14153 // FIXME: Avoid the extend by constructing the right constant pool?
14154 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14155 FudgePtr, MachinePointerInfo::getConstantPool(),
14156 MVT::f32, false, false, false, 4);
14157 // Extend everything to 80 bits to force it to be done on x87.
14158 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14159 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14162 std::pair<SDValue,SDValue>
14163 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14164 bool IsSigned, bool IsReplace) const {
14167 EVT DstTy = Op.getValueType();
14169 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14170 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14174 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14175 DstTy.getSimpleVT() >= MVT::i16 &&
14176 "Unknown FP_TO_INT to lower!");
14178 // These are really Legal.
14179 if (DstTy == MVT::i32 &&
14180 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14181 return std::make_pair(SDValue(), SDValue());
14182 if (Subtarget->is64Bit() &&
14183 DstTy == MVT::i64 &&
14184 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14185 return std::make_pair(SDValue(), SDValue());
14187 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14188 // stack slot, or into the FTOL runtime function.
14189 MachineFunction &MF = DAG.getMachineFunction();
14190 unsigned MemSize = DstTy.getSizeInBits()/8;
14191 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14192 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14195 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14196 Opc = X86ISD::WIN_FTOL;
14198 switch (DstTy.getSimpleVT().SimpleTy) {
14199 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14200 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14201 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14202 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14205 SDValue Chain = DAG.getEntryNode();
14206 SDValue Value = Op.getOperand(0);
14207 EVT TheVT = Op.getOperand(0).getValueType();
14208 // FIXME This causes a redundant load/store if the SSE-class value is already
14209 // in memory, such as if it is on the callstack.
14210 if (isScalarFPTypeInSSEReg(TheVT)) {
14211 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14212 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14213 MachinePointerInfo::getFixedStack(SSFI),
14215 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14217 Chain, StackSlot, DAG.getValueType(TheVT)
14220 MachineMemOperand *MMO =
14221 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14222 MachineMemOperand::MOLoad, MemSize, MemSize);
14223 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14224 Chain = Value.getValue(1);
14225 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14226 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14229 MachineMemOperand *MMO =
14230 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14231 MachineMemOperand::MOStore, MemSize, MemSize);
14233 if (Opc != X86ISD::WIN_FTOL) {
14234 // Build the FP_TO_INT*_IN_MEM
14235 SDValue Ops[] = { Chain, Value, StackSlot };
14236 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14238 return std::make_pair(FIST, StackSlot);
14240 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14241 DAG.getVTList(MVT::Other, MVT::Glue),
14243 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14244 MVT::i32, ftol.getValue(1));
14245 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14246 MVT::i32, eax.getValue(2));
14247 SDValue Ops[] = { eax, edx };
14248 SDValue pair = IsReplace
14249 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14250 : DAG.getMergeValues(Ops, DL);
14251 return std::make_pair(pair, SDValue());
14255 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14256 const X86Subtarget *Subtarget) {
14257 MVT VT = Op->getSimpleValueType(0);
14258 SDValue In = Op->getOperand(0);
14259 MVT InVT = In.getSimpleValueType();
14262 // Optimize vectors in AVX mode:
14265 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14266 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14267 // Concat upper and lower parts.
14270 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14271 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14272 // Concat upper and lower parts.
14275 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14276 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14277 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14280 if (Subtarget->hasInt256())
14281 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14283 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14284 SDValue Undef = DAG.getUNDEF(InVT);
14285 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14286 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14287 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14289 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14290 VT.getVectorNumElements()/2);
14292 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14293 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14295 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14298 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14299 SelectionDAG &DAG) {
14300 MVT VT = Op->getSimpleValueType(0);
14301 SDValue In = Op->getOperand(0);
14302 MVT InVT = In.getSimpleValueType();
14304 unsigned int NumElts = VT.getVectorNumElements();
14305 if (NumElts != 8 && NumElts != 16)
14308 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14309 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14311 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14312 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14313 // Now we have only mask extension
14314 assert(InVT.getVectorElementType() == MVT::i1);
14315 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14316 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14317 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14318 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14319 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14320 MachinePointerInfo::getConstantPool(),
14321 false, false, false, Alignment);
14323 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14324 if (VT.is512BitVector())
14326 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14329 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14330 SelectionDAG &DAG) {
14331 if (Subtarget->hasFp256()) {
14332 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14340 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14341 SelectionDAG &DAG) {
14343 MVT VT = Op.getSimpleValueType();
14344 SDValue In = Op.getOperand(0);
14345 MVT SVT = In.getSimpleValueType();
14347 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14348 return LowerZERO_EXTEND_AVX512(Op, DAG);
14350 if (Subtarget->hasFp256()) {
14351 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14356 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14357 VT.getVectorNumElements() != SVT.getVectorNumElements());
14361 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14363 MVT VT = Op.getSimpleValueType();
14364 SDValue In = Op.getOperand(0);
14365 MVT InVT = In.getSimpleValueType();
14367 if (VT == MVT::i1) {
14368 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14369 "Invalid scalar TRUNCATE operation");
14370 if (InVT.getSizeInBits() >= 32)
14372 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14373 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14375 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14376 "Invalid TRUNCATE operation");
14378 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14379 if (VT.getVectorElementType().getSizeInBits() >=8)
14380 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14382 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14383 unsigned NumElts = InVT.getVectorNumElements();
14384 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14385 if (InVT.getSizeInBits() < 512) {
14386 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14387 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14391 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14392 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14393 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14394 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14395 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14396 MachinePointerInfo::getConstantPool(),
14397 false, false, false, Alignment);
14398 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14399 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14400 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14403 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14404 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14405 if (Subtarget->hasInt256()) {
14406 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14407 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14408 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14410 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14411 DAG.getIntPtrConstant(0));
14414 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14415 DAG.getIntPtrConstant(0));
14416 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14417 DAG.getIntPtrConstant(2));
14418 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14419 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14420 static const int ShufMask[] = {0, 2, 4, 6};
14421 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14424 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14425 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14426 if (Subtarget->hasInt256()) {
14427 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14429 SmallVector<SDValue,32> pshufbMask;
14430 for (unsigned i = 0; i < 2; ++i) {
14431 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14432 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14433 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14434 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14435 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14436 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14437 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14438 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14439 for (unsigned j = 0; j < 8; ++j)
14440 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14442 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14443 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14444 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14446 static const int ShufMask[] = {0, 2, -1, -1};
14447 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14449 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14450 DAG.getIntPtrConstant(0));
14451 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14454 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14455 DAG.getIntPtrConstant(0));
14457 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14458 DAG.getIntPtrConstant(4));
14460 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14461 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14463 // The PSHUFB mask:
14464 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14465 -1, -1, -1, -1, -1, -1, -1, -1};
14467 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14468 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14469 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14471 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14472 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14474 // The MOVLHPS Mask:
14475 static const int ShufMask2[] = {0, 1, 4, 5};
14476 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14477 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14480 // Handle truncation of V256 to V128 using shuffles.
14481 if (!VT.is128BitVector() || !InVT.is256BitVector())
14484 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14486 unsigned NumElems = VT.getVectorNumElements();
14487 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14489 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14490 // Prepare truncation shuffle mask
14491 for (unsigned i = 0; i != NumElems; ++i)
14492 MaskVec[i] = i * 2;
14493 SDValue V = DAG.getVectorShuffle(NVT, DL,
14494 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14495 DAG.getUNDEF(NVT), &MaskVec[0]);
14496 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14497 DAG.getIntPtrConstant(0));
14500 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14501 SelectionDAG &DAG) const {
14502 assert(!Op.getSimpleValueType().isVector());
14504 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14505 /*IsSigned=*/ true, /*IsReplace=*/ false);
14506 SDValue FIST = Vals.first, StackSlot = Vals.second;
14507 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14508 if (!FIST.getNode()) return Op;
14510 if (StackSlot.getNode())
14511 // Load the result.
14512 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14513 FIST, StackSlot, MachinePointerInfo(),
14514 false, false, false, 0);
14516 // The node is the result.
14520 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14521 SelectionDAG &DAG) const {
14522 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14523 /*IsSigned=*/ false, /*IsReplace=*/ false);
14524 SDValue FIST = Vals.first, StackSlot = Vals.second;
14525 assert(FIST.getNode() && "Unexpected failure");
14527 if (StackSlot.getNode())
14528 // Load the result.
14529 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14530 FIST, StackSlot, MachinePointerInfo(),
14531 false, false, false, 0);
14533 // The node is the result.
14537 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14539 MVT VT = Op.getSimpleValueType();
14540 SDValue In = Op.getOperand(0);
14541 MVT SVT = In.getSimpleValueType();
14543 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14545 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14546 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14547 In, DAG.getUNDEF(SVT)));
14550 /// The only differences between FABS and FNEG are the mask and the logic op.
14551 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14552 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14553 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14554 "Wrong opcode for lowering FABS or FNEG.");
14556 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14558 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14559 // into an FNABS. We'll lower the FABS after that if it is still in use.
14561 for (SDNode *User : Op->uses())
14562 if (User->getOpcode() == ISD::FNEG)
14565 SDValue Op0 = Op.getOperand(0);
14566 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
14569 MVT VT = Op.getSimpleValueType();
14570 // Assume scalar op for initialization; update for vector if needed.
14571 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
14572 // generate a 16-byte vector constant and logic op even for the scalar case.
14573 // Using a 16-byte mask allows folding the load of the mask with
14574 // the logic op, so it can save (~4 bytes) on code size.
14576 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
14577 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
14578 // decide if we should generate a 16-byte constant mask when we only need 4 or
14579 // 8 bytes for the scalar case.
14580 if (VT.isVector()) {
14581 EltVT = VT.getVectorElementType();
14582 NumElts = VT.getVectorNumElements();
14585 unsigned EltBits = EltVT.getSizeInBits();
14586 LLVMContext *Context = DAG.getContext();
14587 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
14589 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
14590 Constant *C = ConstantInt::get(*Context, MaskElt);
14591 C = ConstantVector::getSplat(NumElts, C);
14592 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14593 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
14594 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
14595 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14596 MachinePointerInfo::getConstantPool(),
14597 false, false, false, Alignment);
14599 if (VT.isVector()) {
14600 // For a vector, cast operands to a vector type, perform the logic op,
14601 // and cast the result back to the original value type.
14602 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
14603 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
14604 SDValue Operand = IsFNABS ?
14605 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
14606 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
14607 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
14608 return DAG.getNode(ISD::BITCAST, dl, VT,
14609 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
14612 // If not vector, then scalar.
14613 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
14614 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
14615 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
14618 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
14619 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14620 LLVMContext *Context = DAG.getContext();
14621 SDValue Op0 = Op.getOperand(0);
14622 SDValue Op1 = Op.getOperand(1);
14624 MVT VT = Op.getSimpleValueType();
14625 MVT SrcVT = Op1.getSimpleValueType();
14627 // If second operand is smaller, extend it first.
14628 if (SrcVT.bitsLT(VT)) {
14629 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
14632 // And if it is bigger, shrink it first.
14633 if (SrcVT.bitsGT(VT)) {
14634 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
14638 // At this point the operands and the result should have the same
14639 // type, and that won't be f80 since that is not custom lowered.
14641 const fltSemantics &Sem =
14642 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
14643 const unsigned SizeInBits = VT.getSizeInBits();
14645 SmallVector<Constant *, 4> CV(
14646 VT == MVT::f64 ? 2 : 4,
14647 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
14649 // First, clear all bits but the sign bit from the second operand (sign).
14650 CV[0] = ConstantFP::get(*Context,
14651 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
14652 Constant *C = ConstantVector::get(CV);
14653 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14654 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
14655 MachinePointerInfo::getConstantPool(),
14656 false, false, false, 16);
14657 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
14659 // Next, clear the sign bit from the first operand (magnitude).
14660 // If it's a constant, we can clear it here.
14661 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
14662 APFloat APF = Op0CN->getValueAPF();
14663 // If the magnitude is a positive zero, the sign bit alone is enough.
14664 if (APF.isPosZero())
14667 CV[0] = ConstantFP::get(*Context, APF);
14669 CV[0] = ConstantFP::get(
14671 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
14673 C = ConstantVector::get(CV);
14674 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14675 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14676 MachinePointerInfo::getConstantPool(),
14677 false, false, false, 16);
14678 // If the magnitude operand wasn't a constant, we need to AND out the sign.
14679 if (!isa<ConstantFPSDNode>(Op0))
14680 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
14682 // OR the magnitude value with the sign bit.
14683 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
14686 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
14687 SDValue N0 = Op.getOperand(0);
14689 MVT VT = Op.getSimpleValueType();
14691 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
14692 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
14693 DAG.getConstant(1, VT));
14694 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
14697 // Check whether an OR'd tree is PTEST-able.
14698 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
14699 SelectionDAG &DAG) {
14700 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
14702 if (!Subtarget->hasSSE41())
14705 if (!Op->hasOneUse())
14708 SDNode *N = Op.getNode();
14711 SmallVector<SDValue, 8> Opnds;
14712 DenseMap<SDValue, unsigned> VecInMap;
14713 SmallVector<SDValue, 8> VecIns;
14714 EVT VT = MVT::Other;
14716 // Recognize a special case where a vector is casted into wide integer to
14718 Opnds.push_back(N->getOperand(0));
14719 Opnds.push_back(N->getOperand(1));
14721 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
14722 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
14723 // BFS traverse all OR'd operands.
14724 if (I->getOpcode() == ISD::OR) {
14725 Opnds.push_back(I->getOperand(0));
14726 Opnds.push_back(I->getOperand(1));
14727 // Re-evaluate the number of nodes to be traversed.
14728 e += 2; // 2 more nodes (LHS and RHS) are pushed.
14732 // Quit if a non-EXTRACT_VECTOR_ELT
14733 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
14736 // Quit if without a constant index.
14737 SDValue Idx = I->getOperand(1);
14738 if (!isa<ConstantSDNode>(Idx))
14741 SDValue ExtractedFromVec = I->getOperand(0);
14742 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
14743 if (M == VecInMap.end()) {
14744 VT = ExtractedFromVec.getValueType();
14745 // Quit if not 128/256-bit vector.
14746 if (!VT.is128BitVector() && !VT.is256BitVector())
14748 // Quit if not the same type.
14749 if (VecInMap.begin() != VecInMap.end() &&
14750 VT != VecInMap.begin()->first.getValueType())
14752 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
14753 VecIns.push_back(ExtractedFromVec);
14755 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
14758 assert((VT.is128BitVector() || VT.is256BitVector()) &&
14759 "Not extracted from 128-/256-bit vector.");
14761 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
14763 for (DenseMap<SDValue, unsigned>::const_iterator
14764 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
14765 // Quit if not all elements are used.
14766 if (I->second != FullMask)
14770 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
14772 // Cast all vectors into TestVT for PTEST.
14773 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
14774 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
14776 // If more than one full vectors are evaluated, OR them first before PTEST.
14777 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
14778 // Each iteration will OR 2 nodes and append the result until there is only
14779 // 1 node left, i.e. the final OR'd value of all vectors.
14780 SDValue LHS = VecIns[Slot];
14781 SDValue RHS = VecIns[Slot + 1];
14782 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
14785 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
14786 VecIns.back(), VecIns.back());
14789 /// \brief return true if \c Op has a use that doesn't just read flags.
14790 static bool hasNonFlagsUse(SDValue Op) {
14791 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
14793 SDNode *User = *UI;
14794 unsigned UOpNo = UI.getOperandNo();
14795 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
14796 // Look pass truncate.
14797 UOpNo = User->use_begin().getOperandNo();
14798 User = *User->use_begin();
14801 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
14802 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
14808 /// Emit nodes that will be selected as "test Op0,Op0", or something
14810 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
14811 SelectionDAG &DAG) const {
14812 if (Op.getValueType() == MVT::i1)
14813 // KORTEST instruction should be selected
14814 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
14815 DAG.getConstant(0, Op.getValueType()));
14817 // CF and OF aren't always set the way we want. Determine which
14818 // of these we need.
14819 bool NeedCF = false;
14820 bool NeedOF = false;
14823 case X86::COND_A: case X86::COND_AE:
14824 case X86::COND_B: case X86::COND_BE:
14827 case X86::COND_G: case X86::COND_GE:
14828 case X86::COND_L: case X86::COND_LE:
14829 case X86::COND_O: case X86::COND_NO: {
14830 // Check if we really need to set the
14831 // Overflow flag. If NoSignedWrap is present
14832 // that is not actually needed.
14833 switch (Op->getOpcode()) {
14838 const BinaryWithFlagsSDNode *BinNode =
14839 cast<BinaryWithFlagsSDNode>(Op.getNode());
14840 if (BinNode->hasNoSignedWrap())
14850 // See if we can use the EFLAGS value from the operand instead of
14851 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
14852 // we prove that the arithmetic won't overflow, we can't use OF or CF.
14853 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
14854 // Emit a CMP with 0, which is the TEST pattern.
14855 //if (Op.getValueType() == MVT::i1)
14856 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
14857 // DAG.getConstant(0, MVT::i1));
14858 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
14859 DAG.getConstant(0, Op.getValueType()));
14861 unsigned Opcode = 0;
14862 unsigned NumOperands = 0;
14864 // Truncate operations may prevent the merge of the SETCC instruction
14865 // and the arithmetic instruction before it. Attempt to truncate the operands
14866 // of the arithmetic instruction and use a reduced bit-width instruction.
14867 bool NeedTruncation = false;
14868 SDValue ArithOp = Op;
14869 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
14870 SDValue Arith = Op->getOperand(0);
14871 // Both the trunc and the arithmetic op need to have one user each.
14872 if (Arith->hasOneUse())
14873 switch (Arith.getOpcode()) {
14880 NeedTruncation = true;
14886 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
14887 // which may be the result of a CAST. We use the variable 'Op', which is the
14888 // non-casted variable when we check for possible users.
14889 switch (ArithOp.getOpcode()) {
14891 // Due to an isel shortcoming, be conservative if this add is likely to be
14892 // selected as part of a load-modify-store instruction. When the root node
14893 // in a match is a store, isel doesn't know how to remap non-chain non-flag
14894 // uses of other nodes in the match, such as the ADD in this case. This
14895 // leads to the ADD being left around and reselected, with the result being
14896 // two adds in the output. Alas, even if none our users are stores, that
14897 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
14898 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
14899 // climbing the DAG back to the root, and it doesn't seem to be worth the
14901 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
14902 UE = Op.getNode()->use_end(); UI != UE; ++UI)
14903 if (UI->getOpcode() != ISD::CopyToReg &&
14904 UI->getOpcode() != ISD::SETCC &&
14905 UI->getOpcode() != ISD::STORE)
14908 if (ConstantSDNode *C =
14909 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
14910 // An add of one will be selected as an INC.
14911 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
14912 Opcode = X86ISD::INC;
14917 // An add of negative one (subtract of one) will be selected as a DEC.
14918 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
14919 Opcode = X86ISD::DEC;
14925 // Otherwise use a regular EFLAGS-setting add.
14926 Opcode = X86ISD::ADD;
14931 // If we have a constant logical shift that's only used in a comparison
14932 // against zero turn it into an equivalent AND. This allows turning it into
14933 // a TEST instruction later.
14934 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
14935 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
14936 EVT VT = Op.getValueType();
14937 unsigned BitWidth = VT.getSizeInBits();
14938 unsigned ShAmt = Op->getConstantOperandVal(1);
14939 if (ShAmt >= BitWidth) // Avoid undefined shifts.
14941 APInt Mask = ArithOp.getOpcode() == ISD::SRL
14942 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
14943 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
14944 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
14946 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
14947 DAG.getConstant(Mask, VT));
14948 DAG.ReplaceAllUsesWith(Op, New);
14954 // If the primary and result isn't used, don't bother using X86ISD::AND,
14955 // because a TEST instruction will be better.
14956 if (!hasNonFlagsUse(Op))
14962 // Due to the ISEL shortcoming noted above, be conservative if this op is
14963 // likely to be selected as part of a load-modify-store instruction.
14964 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
14965 UE = Op.getNode()->use_end(); UI != UE; ++UI)
14966 if (UI->getOpcode() == ISD::STORE)
14969 // Otherwise use a regular EFLAGS-setting instruction.
14970 switch (ArithOp.getOpcode()) {
14971 default: llvm_unreachable("unexpected operator!");
14972 case ISD::SUB: Opcode = X86ISD::SUB; break;
14973 case ISD::XOR: Opcode = X86ISD::XOR; break;
14974 case ISD::AND: Opcode = X86ISD::AND; break;
14976 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
14977 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
14978 if (EFLAGS.getNode())
14981 Opcode = X86ISD::OR;
14995 return SDValue(Op.getNode(), 1);
15001 // If we found that truncation is beneficial, perform the truncation and
15003 if (NeedTruncation) {
15004 EVT VT = Op.getValueType();
15005 SDValue WideVal = Op->getOperand(0);
15006 EVT WideVT = WideVal.getValueType();
15007 unsigned ConvertedOp = 0;
15008 // Use a target machine opcode to prevent further DAGCombine
15009 // optimizations that may separate the arithmetic operations
15010 // from the setcc node.
15011 switch (WideVal.getOpcode()) {
15013 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15014 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15015 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15016 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15017 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15021 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15022 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15023 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15024 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15025 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15031 // Emit a CMP with 0, which is the TEST pattern.
15032 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15033 DAG.getConstant(0, Op.getValueType()));
15035 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15036 SmallVector<SDValue, 4> Ops;
15037 for (unsigned i = 0; i != NumOperands; ++i)
15038 Ops.push_back(Op.getOperand(i));
15040 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15041 DAG.ReplaceAllUsesWith(Op, New);
15042 return SDValue(New.getNode(), 1);
15045 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15047 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15048 SDLoc dl, SelectionDAG &DAG) const {
15049 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15050 if (C->getAPIntValue() == 0)
15051 return EmitTest(Op0, X86CC, dl, DAG);
15053 if (Op0.getValueType() == MVT::i1)
15054 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15057 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15058 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15059 // Do the comparison at i32 if it's smaller, besides the Atom case.
15060 // This avoids subregister aliasing issues. Keep the smaller reference
15061 // if we're optimizing for size, however, as that'll allow better folding
15062 // of memory operations.
15063 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15064 !DAG.getMachineFunction().getFunction()->getAttributes().hasAttribute(
15065 AttributeSet::FunctionIndex, Attribute::MinSize) &&
15066 !Subtarget->isAtom()) {
15067 unsigned ExtendOp =
15068 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15069 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15070 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15072 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15073 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15074 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15076 return SDValue(Sub.getNode(), 1);
15078 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15081 /// Convert a comparison if required by the subtarget.
15082 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15083 SelectionDAG &DAG) const {
15084 // If the subtarget does not support the FUCOMI instruction, floating-point
15085 // comparisons have to be converted.
15086 if (Subtarget->hasCMov() ||
15087 Cmp.getOpcode() != X86ISD::CMP ||
15088 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15089 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15092 // The instruction selector will select an FUCOM instruction instead of
15093 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15094 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15095 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15097 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15098 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15099 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15100 DAG.getConstant(8, MVT::i8));
15101 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15102 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15105 /// The minimum architected relative accuracy is 2^-12. We need one
15106 /// Newton-Raphson step to have a good float result (24 bits of precision).
15107 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15108 DAGCombinerInfo &DCI,
15109 unsigned &RefinementSteps,
15110 bool &UseOneConstNR) const {
15111 // FIXME: We should use instruction latency models to calculate the cost of
15112 // each potential sequence, but this is very hard to do reliably because
15113 // at least Intel's Core* chips have variable timing based on the number of
15114 // significant digits in the divisor and/or sqrt operand.
15115 if (!Subtarget->useSqrtEst())
15118 EVT VT = Op.getValueType();
15120 // SSE1 has rsqrtss and rsqrtps.
15121 // TODO: Add support for AVX512 (v16f32).
15122 // It is likely not profitable to do this for f64 because a double-precision
15123 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15124 // instructions: convert to single, rsqrtss, convert back to double, refine
15125 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15126 // along with FMA, this could be a throughput win.
15127 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15128 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15129 RefinementSteps = 1;
15130 UseOneConstNR = false;
15131 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15136 /// The minimum architected relative accuracy is 2^-12. We need one
15137 /// Newton-Raphson step to have a good float result (24 bits of precision).
15138 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15139 DAGCombinerInfo &DCI,
15140 unsigned &RefinementSteps) const {
15141 // FIXME: We should use instruction latency models to calculate the cost of
15142 // each potential sequence, but this is very hard to do reliably because
15143 // at least Intel's Core* chips have variable timing based on the number of
15144 // significant digits in the divisor.
15145 if (!Subtarget->useReciprocalEst())
15148 EVT VT = Op.getValueType();
15150 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15151 // TODO: Add support for AVX512 (v16f32).
15152 // It is likely not profitable to do this for f64 because a double-precision
15153 // reciprocal estimate with refinement on x86 prior to FMA requires
15154 // 15 instructions: convert to single, rcpss, convert back to double, refine
15155 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15156 // along with FMA, this could be a throughput win.
15157 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15158 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15159 RefinementSteps = ReciprocalEstimateRefinementSteps;
15160 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15165 static bool isAllOnes(SDValue V) {
15166 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15167 return C && C->isAllOnesValue();
15170 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15171 /// if it's possible.
15172 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15173 SDLoc dl, SelectionDAG &DAG) const {
15174 SDValue Op0 = And.getOperand(0);
15175 SDValue Op1 = And.getOperand(1);
15176 if (Op0.getOpcode() == ISD::TRUNCATE)
15177 Op0 = Op0.getOperand(0);
15178 if (Op1.getOpcode() == ISD::TRUNCATE)
15179 Op1 = Op1.getOperand(0);
15182 if (Op1.getOpcode() == ISD::SHL)
15183 std::swap(Op0, Op1);
15184 if (Op0.getOpcode() == ISD::SHL) {
15185 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15186 if (And00C->getZExtValue() == 1) {
15187 // If we looked past a truncate, check that it's only truncating away
15189 unsigned BitWidth = Op0.getValueSizeInBits();
15190 unsigned AndBitWidth = And.getValueSizeInBits();
15191 if (BitWidth > AndBitWidth) {
15193 DAG.computeKnownBits(Op0, Zeros, Ones);
15194 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15198 RHS = Op0.getOperand(1);
15200 } else if (Op1.getOpcode() == ISD::Constant) {
15201 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15202 uint64_t AndRHSVal = AndRHS->getZExtValue();
15203 SDValue AndLHS = Op0;
15205 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15206 LHS = AndLHS.getOperand(0);
15207 RHS = AndLHS.getOperand(1);
15210 // Use BT if the immediate can't be encoded in a TEST instruction.
15211 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15213 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15217 if (LHS.getNode()) {
15218 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15219 // instruction. Since the shift amount is in-range-or-undefined, we know
15220 // that doing a bittest on the i32 value is ok. We extend to i32 because
15221 // the encoding for the i16 version is larger than the i32 version.
15222 // Also promote i16 to i32 for performance / code size reason.
15223 if (LHS.getValueType() == MVT::i8 ||
15224 LHS.getValueType() == MVT::i16)
15225 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15227 // If the operand types disagree, extend the shift amount to match. Since
15228 // BT ignores high bits (like shifts) we can use anyextend.
15229 if (LHS.getValueType() != RHS.getValueType())
15230 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15232 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15233 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15234 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15235 DAG.getConstant(Cond, MVT::i8), BT);
15241 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15243 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15248 // SSE Condition code mapping:
15257 switch (SetCCOpcode) {
15258 default: llvm_unreachable("Unexpected SETCC condition");
15260 case ISD::SETEQ: SSECC = 0; break;
15262 case ISD::SETGT: Swap = true; // Fallthrough
15264 case ISD::SETOLT: SSECC = 1; break;
15266 case ISD::SETGE: Swap = true; // Fallthrough
15268 case ISD::SETOLE: SSECC = 2; break;
15269 case ISD::SETUO: SSECC = 3; break;
15271 case ISD::SETNE: SSECC = 4; break;
15272 case ISD::SETULE: Swap = true; // Fallthrough
15273 case ISD::SETUGE: SSECC = 5; break;
15274 case ISD::SETULT: Swap = true; // Fallthrough
15275 case ISD::SETUGT: SSECC = 6; break;
15276 case ISD::SETO: SSECC = 7; break;
15278 case ISD::SETONE: SSECC = 8; break;
15281 std::swap(Op0, Op1);
15286 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15287 // ones, and then concatenate the result back.
15288 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15289 MVT VT = Op.getSimpleValueType();
15291 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15292 "Unsupported value type for operation");
15294 unsigned NumElems = VT.getVectorNumElements();
15296 SDValue CC = Op.getOperand(2);
15298 // Extract the LHS vectors
15299 SDValue LHS = Op.getOperand(0);
15300 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15301 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15303 // Extract the RHS vectors
15304 SDValue RHS = Op.getOperand(1);
15305 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15306 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15308 // Issue the operation on the smaller types and concatenate the result back
15309 MVT EltVT = VT.getVectorElementType();
15310 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15311 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15312 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15313 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15316 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15317 const X86Subtarget *Subtarget) {
15318 SDValue Op0 = Op.getOperand(0);
15319 SDValue Op1 = Op.getOperand(1);
15320 SDValue CC = Op.getOperand(2);
15321 MVT VT = Op.getSimpleValueType();
15324 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15325 Op.getValueType().getScalarType() == MVT::i1 &&
15326 "Cannot set masked compare for this operation");
15328 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15330 bool Unsigned = false;
15333 switch (SetCCOpcode) {
15334 default: llvm_unreachable("Unexpected SETCC condition");
15335 case ISD::SETNE: SSECC = 4; break;
15336 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15337 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15338 case ISD::SETLT: Swap = true; //fall-through
15339 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15340 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15341 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15342 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15343 case ISD::SETULE: Unsigned = true; //fall-through
15344 case ISD::SETLE: SSECC = 2; break;
15348 std::swap(Op0, Op1);
15350 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15351 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15352 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15353 DAG.getConstant(SSECC, MVT::i8));
15356 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15357 /// operand \p Op1. If non-trivial (for example because it's not constant)
15358 /// return an empty value.
15359 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15361 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15365 MVT VT = Op1.getSimpleValueType();
15366 MVT EVT = VT.getVectorElementType();
15367 unsigned n = VT.getVectorNumElements();
15368 SmallVector<SDValue, 8> ULTOp1;
15370 for (unsigned i = 0; i < n; ++i) {
15371 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15372 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15375 // Avoid underflow.
15376 APInt Val = Elt->getAPIntValue();
15380 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15383 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15386 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15387 SelectionDAG &DAG) {
15388 SDValue Op0 = Op.getOperand(0);
15389 SDValue Op1 = Op.getOperand(1);
15390 SDValue CC = Op.getOperand(2);
15391 MVT VT = Op.getSimpleValueType();
15392 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15393 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15398 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15399 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15402 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15403 unsigned Opc = X86ISD::CMPP;
15404 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15405 assert(VT.getVectorNumElements() <= 16);
15406 Opc = X86ISD::CMPM;
15408 // In the two special cases we can't handle, emit two comparisons.
15411 unsigned CombineOpc;
15412 if (SetCCOpcode == ISD::SETUEQ) {
15413 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15415 assert(SetCCOpcode == ISD::SETONE);
15416 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15419 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15420 DAG.getConstant(CC0, MVT::i8));
15421 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15422 DAG.getConstant(CC1, MVT::i8));
15423 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15425 // Handle all other FP comparisons here.
15426 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15427 DAG.getConstant(SSECC, MVT::i8));
15430 // Break 256-bit integer vector compare into smaller ones.
15431 if (VT.is256BitVector() && !Subtarget->hasInt256())
15432 return Lower256IntVSETCC(Op, DAG);
15434 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15435 EVT OpVT = Op1.getValueType();
15436 if (Subtarget->hasAVX512()) {
15437 if (Op1.getValueType().is512BitVector() ||
15438 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15439 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15440 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15442 // In AVX-512 architecture setcc returns mask with i1 elements,
15443 // But there is no compare instruction for i8 and i16 elements in KNL.
15444 // We are not talking about 512-bit operands in this case, these
15445 // types are illegal.
15447 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15448 OpVT.getVectorElementType().getSizeInBits() >= 8))
15449 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15450 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15453 // We are handling one of the integer comparisons here. Since SSE only has
15454 // GT and EQ comparisons for integer, swapping operands and multiple
15455 // operations may be required for some comparisons.
15457 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15458 bool Subus = false;
15460 switch (SetCCOpcode) {
15461 default: llvm_unreachable("Unexpected SETCC condition");
15462 case ISD::SETNE: Invert = true;
15463 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15464 case ISD::SETLT: Swap = true;
15465 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15466 case ISD::SETGE: Swap = true;
15467 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15468 Invert = true; break;
15469 case ISD::SETULT: Swap = true;
15470 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15471 FlipSigns = true; break;
15472 case ISD::SETUGE: Swap = true;
15473 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15474 FlipSigns = true; Invert = true; break;
15477 // Special case: Use min/max operations for SETULE/SETUGE
15478 MVT VET = VT.getVectorElementType();
15480 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15481 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15484 switch (SetCCOpcode) {
15486 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15487 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15490 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15493 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15494 if (!MinMax && hasSubus) {
15495 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15497 // t = psubus Op0, Op1
15498 // pcmpeq t, <0..0>
15499 switch (SetCCOpcode) {
15501 case ISD::SETULT: {
15502 // If the comparison is against a constant we can turn this into a
15503 // setule. With psubus, setule does not require a swap. This is
15504 // beneficial because the constant in the register is no longer
15505 // destructed as the destination so it can be hoisted out of a loop.
15506 // Only do this pre-AVX since vpcmp* is no longer destructive.
15507 if (Subtarget->hasAVX())
15509 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
15510 if (ULEOp1.getNode()) {
15512 Subus = true; Invert = false; Swap = false;
15516 // Psubus is better than flip-sign because it requires no inversion.
15517 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15518 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15522 Opc = X86ISD::SUBUS;
15528 std::swap(Op0, Op1);
15530 // Check that the operation in question is available (most are plain SSE2,
15531 // but PCMPGTQ and PCMPEQQ have different requirements).
15532 if (VT == MVT::v2i64) {
15533 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
15534 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
15536 // First cast everything to the right type.
15537 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15538 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15540 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15541 // bits of the inputs before performing those operations. The lower
15542 // compare is always unsigned.
15545 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
15547 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
15548 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
15549 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
15550 Sign, Zero, Sign, Zero);
15552 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15553 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15555 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15556 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15557 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15559 // Create masks for only the low parts/high parts of the 64 bit integers.
15560 static const int MaskHi[] = { 1, 1, 3, 3 };
15561 static const int MaskLo[] = { 0, 0, 2, 2 };
15562 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
15563 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
15564 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
15566 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
15567 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
15570 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15572 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15575 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
15576 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
15577 // pcmpeqd + pshufd + pand.
15578 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
15580 // First cast everything to the right type.
15581 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15582 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15585 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
15587 // Make sure the lower and upper halves are both all-ones.
15588 static const int Mask[] = { 1, 0, 3, 2 };
15589 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
15590 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
15593 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15595 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15599 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15600 // bits of the inputs before performing those operations.
15602 EVT EltVT = VT.getVectorElementType();
15603 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
15604 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
15605 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
15608 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
15610 // If the logical-not of the result is required, perform that now.
15612 Result = DAG.getNOT(dl, Result, VT);
15615 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
15618 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
15619 getZeroVector(VT, Subtarget, DAG, dl));
15624 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
15626 MVT VT = Op.getSimpleValueType();
15628 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
15630 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
15631 && "SetCC type must be 8-bit or 1-bit integer");
15632 SDValue Op0 = Op.getOperand(0);
15633 SDValue Op1 = Op.getOperand(1);
15635 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15637 // Optimize to BT if possible.
15638 // Lower (X & (1 << N)) == 0 to BT(X, N).
15639 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
15640 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
15641 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
15642 Op1.getOpcode() == ISD::Constant &&
15643 cast<ConstantSDNode>(Op1)->isNullValue() &&
15644 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15645 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
15646 if (NewSetCC.getNode()) {
15648 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
15653 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
15655 if (Op1.getOpcode() == ISD::Constant &&
15656 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
15657 cast<ConstantSDNode>(Op1)->isNullValue()) &&
15658 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15660 // If the input is a setcc, then reuse the input setcc or use a new one with
15661 // the inverted condition.
15662 if (Op0.getOpcode() == X86ISD::SETCC) {
15663 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
15664 bool Invert = (CC == ISD::SETNE) ^
15665 cast<ConstantSDNode>(Op1)->isNullValue();
15669 CCode = X86::GetOppositeBranchCondition(CCode);
15670 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15671 DAG.getConstant(CCode, MVT::i8),
15672 Op0.getOperand(1));
15674 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
15678 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
15679 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
15680 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15682 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
15683 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
15686 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
15687 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
15688 if (X86CC == X86::COND_INVALID)
15691 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
15692 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
15693 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15694 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
15696 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
15700 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
15701 static bool isX86LogicalCmp(SDValue Op) {
15702 unsigned Opc = Op.getNode()->getOpcode();
15703 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
15704 Opc == X86ISD::SAHF)
15706 if (Op.getResNo() == 1 &&
15707 (Opc == X86ISD::ADD ||
15708 Opc == X86ISD::SUB ||
15709 Opc == X86ISD::ADC ||
15710 Opc == X86ISD::SBB ||
15711 Opc == X86ISD::SMUL ||
15712 Opc == X86ISD::UMUL ||
15713 Opc == X86ISD::INC ||
15714 Opc == X86ISD::DEC ||
15715 Opc == X86ISD::OR ||
15716 Opc == X86ISD::XOR ||
15717 Opc == X86ISD::AND))
15720 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
15726 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
15727 if (V.getOpcode() != ISD::TRUNCATE)
15730 SDValue VOp0 = V.getOperand(0);
15731 unsigned InBits = VOp0.getValueSizeInBits();
15732 unsigned Bits = V.getValueSizeInBits();
15733 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
15736 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
15737 bool addTest = true;
15738 SDValue Cond = Op.getOperand(0);
15739 SDValue Op1 = Op.getOperand(1);
15740 SDValue Op2 = Op.getOperand(2);
15742 EVT VT = Op1.getValueType();
15745 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
15746 // are available. Otherwise fp cmovs get lowered into a less efficient branch
15747 // sequence later on.
15748 if (Cond.getOpcode() == ISD::SETCC &&
15749 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
15750 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
15751 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
15752 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
15753 int SSECC = translateX86FSETCC(
15754 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
15757 if (Subtarget->hasAVX512()) {
15758 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
15759 DAG.getConstant(SSECC, MVT::i8));
15760 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
15762 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
15763 DAG.getConstant(SSECC, MVT::i8));
15764 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
15765 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
15766 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
15770 if (Cond.getOpcode() == ISD::SETCC) {
15771 SDValue NewCond = LowerSETCC(Cond, DAG);
15772 if (NewCond.getNode())
15776 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
15777 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
15778 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
15779 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
15780 if (Cond.getOpcode() == X86ISD::SETCC &&
15781 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
15782 isZero(Cond.getOperand(1).getOperand(1))) {
15783 SDValue Cmp = Cond.getOperand(1);
15785 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
15787 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
15788 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
15789 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
15791 SDValue CmpOp0 = Cmp.getOperand(0);
15792 // Apply further optimizations for special cases
15793 // (select (x != 0), -1, 0) -> neg & sbb
15794 // (select (x == 0), 0, -1) -> neg & sbb
15795 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
15796 if (YC->isNullValue() &&
15797 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
15798 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
15799 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
15800 DAG.getConstant(0, CmpOp0.getValueType()),
15802 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15803 DAG.getConstant(X86::COND_B, MVT::i8),
15804 SDValue(Neg.getNode(), 1));
15808 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
15809 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
15810 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
15812 SDValue Res = // Res = 0 or -1.
15813 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15814 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
15816 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
15817 Res = DAG.getNOT(DL, Res, Res.getValueType());
15819 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
15820 if (!N2C || !N2C->isNullValue())
15821 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
15826 // Look past (and (setcc_carry (cmp ...)), 1).
15827 if (Cond.getOpcode() == ISD::AND &&
15828 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
15829 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
15830 if (C && C->getAPIntValue() == 1)
15831 Cond = Cond.getOperand(0);
15834 // If condition flag is set by a X86ISD::CMP, then use it as the condition
15835 // setting operand in place of the X86ISD::SETCC.
15836 unsigned CondOpcode = Cond.getOpcode();
15837 if (CondOpcode == X86ISD::SETCC ||
15838 CondOpcode == X86ISD::SETCC_CARRY) {
15839 CC = Cond.getOperand(0);
15841 SDValue Cmp = Cond.getOperand(1);
15842 unsigned Opc = Cmp.getOpcode();
15843 MVT VT = Op.getSimpleValueType();
15845 bool IllegalFPCMov = false;
15846 if (VT.isFloatingPoint() && !VT.isVector() &&
15847 !isScalarFPTypeInSSEReg(VT)) // FPStack?
15848 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
15850 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
15851 Opc == X86ISD::BT) { // FIXME
15855 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
15856 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
15857 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
15858 Cond.getOperand(0).getValueType() != MVT::i8)) {
15859 SDValue LHS = Cond.getOperand(0);
15860 SDValue RHS = Cond.getOperand(1);
15861 unsigned X86Opcode;
15864 switch (CondOpcode) {
15865 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
15866 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
15867 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
15868 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
15869 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
15870 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
15871 default: llvm_unreachable("unexpected overflowing operator");
15873 if (CondOpcode == ISD::UMULO)
15874 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
15877 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
15879 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
15881 if (CondOpcode == ISD::UMULO)
15882 Cond = X86Op.getValue(2);
15884 Cond = X86Op.getValue(1);
15886 CC = DAG.getConstant(X86Cond, MVT::i8);
15891 // Look pass the truncate if the high bits are known zero.
15892 if (isTruncWithZeroHighBitsInput(Cond, DAG))
15893 Cond = Cond.getOperand(0);
15895 // We know the result of AND is compared against zero. Try to match
15897 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
15898 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
15899 if (NewSetCC.getNode()) {
15900 CC = NewSetCC.getOperand(0);
15901 Cond = NewSetCC.getOperand(1);
15908 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
15909 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
15912 // a < b ? -1 : 0 -> RES = ~setcc_carry
15913 // a < b ? 0 : -1 -> RES = setcc_carry
15914 // a >= b ? -1 : 0 -> RES = setcc_carry
15915 // a >= b ? 0 : -1 -> RES = ~setcc_carry
15916 if (Cond.getOpcode() == X86ISD::SUB) {
15917 Cond = ConvertCmpIfNecessary(Cond, DAG);
15918 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
15920 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
15921 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
15922 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
15923 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
15924 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
15925 return DAG.getNOT(DL, Res, Res.getValueType());
15930 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
15931 // widen the cmov and push the truncate through. This avoids introducing a new
15932 // branch during isel and doesn't add any extensions.
15933 if (Op.getValueType() == MVT::i8 &&
15934 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
15935 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
15936 if (T1.getValueType() == T2.getValueType() &&
15937 // Blacklist CopyFromReg to avoid partial register stalls.
15938 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
15939 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
15940 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
15941 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
15945 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
15946 // condition is true.
15947 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
15948 SDValue Ops[] = { Op2, Op1, CC, Cond };
15949 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
15952 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
15953 SelectionDAG &DAG) {
15954 MVT VT = Op->getSimpleValueType(0);
15955 SDValue In = Op->getOperand(0);
15956 MVT InVT = In.getSimpleValueType();
15957 MVT VTElt = VT.getVectorElementType();
15958 MVT InVTElt = InVT.getVectorElementType();
15962 if ((InVTElt == MVT::i1) &&
15963 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
15964 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
15966 ((Subtarget->hasBWI() && VT.is512BitVector() &&
15967 VTElt.getSizeInBits() <= 16)) ||
15969 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
15970 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
15972 ((Subtarget->hasDQI() && VT.is512BitVector() &&
15973 VTElt.getSizeInBits() >= 32))))
15974 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
15976 unsigned int NumElts = VT.getVectorNumElements();
15978 if (NumElts != 8 && NumElts != 16)
15981 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
15982 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
15983 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
15984 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
15987 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15988 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
15990 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
15991 Constant *C = ConstantInt::get(*DAG.getContext(),
15992 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
15994 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
15995 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
15996 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
15997 MachinePointerInfo::getConstantPool(),
15998 false, false, false, Alignment);
15999 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16000 if (VT.is512BitVector())
16002 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16005 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16006 SelectionDAG &DAG) {
16007 MVT VT = Op->getSimpleValueType(0);
16008 SDValue In = Op->getOperand(0);
16009 MVT InVT = In.getSimpleValueType();
16012 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16013 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16015 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16016 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16017 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16020 if (Subtarget->hasInt256())
16021 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16023 // Optimize vectors in AVX mode
16024 // Sign extend v8i16 to v8i32 and
16027 // Divide input vector into two parts
16028 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16029 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16030 // concat the vectors to original VT
16032 unsigned NumElems = InVT.getVectorNumElements();
16033 SDValue Undef = DAG.getUNDEF(InVT);
16035 SmallVector<int,8> ShufMask1(NumElems, -1);
16036 for (unsigned i = 0; i != NumElems/2; ++i)
16039 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16041 SmallVector<int,8> ShufMask2(NumElems, -1);
16042 for (unsigned i = 0; i != NumElems/2; ++i)
16043 ShufMask2[i] = i + NumElems/2;
16045 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16047 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16048 VT.getVectorNumElements()/2);
16050 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16051 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16053 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16056 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16057 // may emit an illegal shuffle but the expansion is still better than scalar
16058 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16059 // we'll emit a shuffle and a arithmetic shift.
16060 // TODO: It is possible to support ZExt by zeroing the undef values during
16061 // the shuffle phase or after the shuffle.
16062 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16063 SelectionDAG &DAG) {
16064 MVT RegVT = Op.getSimpleValueType();
16065 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16066 assert(RegVT.isInteger() &&
16067 "We only custom lower integer vector sext loads.");
16069 // Nothing useful we can do without SSE2 shuffles.
16070 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16072 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16074 EVT MemVT = Ld->getMemoryVT();
16075 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16076 unsigned RegSz = RegVT.getSizeInBits();
16078 ISD::LoadExtType Ext = Ld->getExtensionType();
16080 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16081 && "Only anyext and sext are currently implemented.");
16082 assert(MemVT != RegVT && "Cannot extend to the same type");
16083 assert(MemVT.isVector() && "Must load a vector from memory");
16085 unsigned NumElems = RegVT.getVectorNumElements();
16086 unsigned MemSz = MemVT.getSizeInBits();
16087 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16089 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16090 // The only way in which we have a legal 256-bit vector result but not the
16091 // integer 256-bit operations needed to directly lower a sextload is if we
16092 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16093 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16094 // correctly legalized. We do this late to allow the canonical form of
16095 // sextload to persist throughout the rest of the DAG combiner -- it wants
16096 // to fold together any extensions it can, and so will fuse a sign_extend
16097 // of an sextload into a sextload targeting a wider value.
16099 if (MemSz == 128) {
16100 // Just switch this to a normal load.
16101 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16102 "it must be a legal 128-bit vector "
16104 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16105 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16106 Ld->isInvariant(), Ld->getAlignment());
16108 assert(MemSz < 128 &&
16109 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16110 // Do an sext load to a 128-bit vector type. We want to use the same
16111 // number of elements, but elements half as wide. This will end up being
16112 // recursively lowered by this routine, but will succeed as we definitely
16113 // have all the necessary features if we're using AVX1.
16115 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16116 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16118 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16119 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16120 Ld->isNonTemporal(), Ld->isInvariant(),
16121 Ld->getAlignment());
16124 // Replace chain users with the new chain.
16125 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16126 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16128 // Finally, do a normal sign-extend to the desired register.
16129 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16132 // All sizes must be a power of two.
16133 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16134 "Non-power-of-two elements are not custom lowered!");
16136 // Attempt to load the original value using scalar loads.
16137 // Find the largest scalar type that divides the total loaded size.
16138 MVT SclrLoadTy = MVT::i8;
16139 for (MVT Tp : MVT::integer_valuetypes()) {
16140 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16145 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16146 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16148 SclrLoadTy = MVT::f64;
16150 // Calculate the number of scalar loads that we need to perform
16151 // in order to load our vector from memory.
16152 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16154 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16155 "Can only lower sext loads with a single scalar load!");
16157 unsigned loadRegZize = RegSz;
16158 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16161 // Represent our vector as a sequence of elements which are the
16162 // largest scalar that we can load.
16163 EVT LoadUnitVecVT = EVT::getVectorVT(
16164 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16166 // Represent the data using the same element type that is stored in
16167 // memory. In practice, we ''widen'' MemVT.
16169 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16170 loadRegZize / MemVT.getScalarType().getSizeInBits());
16172 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16173 "Invalid vector type");
16175 // We can't shuffle using an illegal type.
16176 assert(TLI.isTypeLegal(WideVecVT) &&
16177 "We only lower types that form legal widened vector types");
16179 SmallVector<SDValue, 8> Chains;
16180 SDValue Ptr = Ld->getBasePtr();
16181 SDValue Increment =
16182 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16183 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16185 for (unsigned i = 0; i < NumLoads; ++i) {
16186 // Perform a single load.
16187 SDValue ScalarLoad =
16188 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16189 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16190 Ld->getAlignment());
16191 Chains.push_back(ScalarLoad.getValue(1));
16192 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16193 // another round of DAGCombining.
16195 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16197 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16198 ScalarLoad, DAG.getIntPtrConstant(i));
16200 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16203 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16205 // Bitcast the loaded value to a vector of the original element type, in
16206 // the size of the target vector type.
16207 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16208 unsigned SizeRatio = RegSz / MemSz;
16210 if (Ext == ISD::SEXTLOAD) {
16211 // If we have SSE4.1, we can directly emit a VSEXT node.
16212 if (Subtarget->hasSSE41()) {
16213 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16214 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16218 // Otherwise we'll shuffle the small elements in the high bits of the
16219 // larger type and perform an arithmetic shift. If the shift is not legal
16220 // it's better to scalarize.
16221 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16222 "We can't implement a sext load without an arithmetic right shift!");
16224 // Redistribute the loaded elements into the different locations.
16225 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16226 for (unsigned i = 0; i != NumElems; ++i)
16227 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16229 SDValue Shuff = DAG.getVectorShuffle(
16230 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16232 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16234 // Build the arithmetic shift.
16235 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16236 MemVT.getVectorElementType().getSizeInBits();
16238 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16240 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16244 // Redistribute the loaded elements into the different locations.
16245 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16246 for (unsigned i = 0; i != NumElems; ++i)
16247 ShuffleVec[i * SizeRatio] = i;
16249 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16250 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16252 // Bitcast to the requested type.
16253 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16254 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16258 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16259 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16260 // from the AND / OR.
16261 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16262 Opc = Op.getOpcode();
16263 if (Opc != ISD::OR && Opc != ISD::AND)
16265 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16266 Op.getOperand(0).hasOneUse() &&
16267 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16268 Op.getOperand(1).hasOneUse());
16271 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16272 // 1 and that the SETCC node has a single use.
16273 static bool isXor1OfSetCC(SDValue Op) {
16274 if (Op.getOpcode() != ISD::XOR)
16276 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16277 if (N1C && N1C->getAPIntValue() == 1) {
16278 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16279 Op.getOperand(0).hasOneUse();
16284 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16285 bool addTest = true;
16286 SDValue Chain = Op.getOperand(0);
16287 SDValue Cond = Op.getOperand(1);
16288 SDValue Dest = Op.getOperand(2);
16291 bool Inverted = false;
16293 if (Cond.getOpcode() == ISD::SETCC) {
16294 // Check for setcc([su]{add,sub,mul}o == 0).
16295 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16296 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16297 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16298 Cond.getOperand(0).getResNo() == 1 &&
16299 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16300 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16301 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16302 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16303 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16304 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16306 Cond = Cond.getOperand(0);
16308 SDValue NewCond = LowerSETCC(Cond, DAG);
16309 if (NewCond.getNode())
16314 // FIXME: LowerXALUO doesn't handle these!!
16315 else if (Cond.getOpcode() == X86ISD::ADD ||
16316 Cond.getOpcode() == X86ISD::SUB ||
16317 Cond.getOpcode() == X86ISD::SMUL ||
16318 Cond.getOpcode() == X86ISD::UMUL)
16319 Cond = LowerXALUO(Cond, DAG);
16322 // Look pass (and (setcc_carry (cmp ...)), 1).
16323 if (Cond.getOpcode() == ISD::AND &&
16324 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16325 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16326 if (C && C->getAPIntValue() == 1)
16327 Cond = Cond.getOperand(0);
16330 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16331 // setting operand in place of the X86ISD::SETCC.
16332 unsigned CondOpcode = Cond.getOpcode();
16333 if (CondOpcode == X86ISD::SETCC ||
16334 CondOpcode == X86ISD::SETCC_CARRY) {
16335 CC = Cond.getOperand(0);
16337 SDValue Cmp = Cond.getOperand(1);
16338 unsigned Opc = Cmp.getOpcode();
16339 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16340 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16344 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16348 // These can only come from an arithmetic instruction with overflow,
16349 // e.g. SADDO, UADDO.
16350 Cond = Cond.getNode()->getOperand(1);
16356 CondOpcode = Cond.getOpcode();
16357 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16358 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16359 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16360 Cond.getOperand(0).getValueType() != MVT::i8)) {
16361 SDValue LHS = Cond.getOperand(0);
16362 SDValue RHS = Cond.getOperand(1);
16363 unsigned X86Opcode;
16366 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16367 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16369 switch (CondOpcode) {
16370 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16372 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16374 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16377 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16378 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16380 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16382 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16385 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16386 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16387 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16388 default: llvm_unreachable("unexpected overflowing operator");
16391 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16392 if (CondOpcode == ISD::UMULO)
16393 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16396 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16398 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16400 if (CondOpcode == ISD::UMULO)
16401 Cond = X86Op.getValue(2);
16403 Cond = X86Op.getValue(1);
16405 CC = DAG.getConstant(X86Cond, MVT::i8);
16409 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16410 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16411 if (CondOpc == ISD::OR) {
16412 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16413 // two branches instead of an explicit OR instruction with a
16415 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16416 isX86LogicalCmp(Cmp)) {
16417 CC = Cond.getOperand(0).getOperand(0);
16418 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16419 Chain, Dest, CC, Cmp);
16420 CC = Cond.getOperand(1).getOperand(0);
16424 } else { // ISD::AND
16425 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16426 // two branches instead of an explicit AND instruction with a
16427 // separate test. However, we only do this if this block doesn't
16428 // have a fall-through edge, because this requires an explicit
16429 // jmp when the condition is false.
16430 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16431 isX86LogicalCmp(Cmp) &&
16432 Op.getNode()->hasOneUse()) {
16433 X86::CondCode CCode =
16434 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16435 CCode = X86::GetOppositeBranchCondition(CCode);
16436 CC = DAG.getConstant(CCode, MVT::i8);
16437 SDNode *User = *Op.getNode()->use_begin();
16438 // Look for an unconditional branch following this conditional branch.
16439 // We need this because we need to reverse the successors in order
16440 // to implement FCMP_OEQ.
16441 if (User->getOpcode() == ISD::BR) {
16442 SDValue FalseBB = User->getOperand(1);
16444 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16445 assert(NewBR == User);
16449 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16450 Chain, Dest, CC, Cmp);
16451 X86::CondCode CCode =
16452 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16453 CCode = X86::GetOppositeBranchCondition(CCode);
16454 CC = DAG.getConstant(CCode, MVT::i8);
16460 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16461 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16462 // It should be transformed during dag combiner except when the condition
16463 // is set by a arithmetics with overflow node.
16464 X86::CondCode CCode =
16465 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16466 CCode = X86::GetOppositeBranchCondition(CCode);
16467 CC = DAG.getConstant(CCode, MVT::i8);
16468 Cond = Cond.getOperand(0).getOperand(1);
16470 } else if (Cond.getOpcode() == ISD::SETCC &&
16471 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16472 // For FCMP_OEQ, we can emit
16473 // two branches instead of an explicit AND instruction with a
16474 // separate test. However, we only do this if this block doesn't
16475 // have a fall-through edge, because this requires an explicit
16476 // jmp when the condition is false.
16477 if (Op.getNode()->hasOneUse()) {
16478 SDNode *User = *Op.getNode()->use_begin();
16479 // Look for an unconditional branch following this conditional branch.
16480 // We need this because we need to reverse the successors in order
16481 // to implement FCMP_OEQ.
16482 if (User->getOpcode() == ISD::BR) {
16483 SDValue FalseBB = User->getOperand(1);
16485 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16486 assert(NewBR == User);
16490 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16491 Cond.getOperand(0), Cond.getOperand(1));
16492 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16493 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16494 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16495 Chain, Dest, CC, Cmp);
16496 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16501 } else if (Cond.getOpcode() == ISD::SETCC &&
16502 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16503 // For FCMP_UNE, we can emit
16504 // two branches instead of an explicit AND instruction with a
16505 // separate test. However, we only do this if this block doesn't
16506 // have a fall-through edge, because this requires an explicit
16507 // jmp when the condition is false.
16508 if (Op.getNode()->hasOneUse()) {
16509 SDNode *User = *Op.getNode()->use_begin();
16510 // Look for an unconditional branch following this conditional branch.
16511 // We need this because we need to reverse the successors in order
16512 // to implement FCMP_UNE.
16513 if (User->getOpcode() == ISD::BR) {
16514 SDValue FalseBB = User->getOperand(1);
16516 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16517 assert(NewBR == User);
16520 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16521 Cond.getOperand(0), Cond.getOperand(1));
16522 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16523 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16524 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16525 Chain, Dest, CC, Cmp);
16526 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
16536 // Look pass the truncate if the high bits are known zero.
16537 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16538 Cond = Cond.getOperand(0);
16540 // We know the result of AND is compared against zero. Try to match
16542 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16543 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
16544 if (NewSetCC.getNode()) {
16545 CC = NewSetCC.getOperand(0);
16546 Cond = NewSetCC.getOperand(1);
16553 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16554 CC = DAG.getConstant(X86Cond, MVT::i8);
16555 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16557 Cond = ConvertCmpIfNecessary(Cond, DAG);
16558 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16559 Chain, Dest, CC, Cond);
16562 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
16563 // Calls to _alloca are needed to probe the stack when allocating more than 4k
16564 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
16565 // that the guard pages used by the OS virtual memory manager are allocated in
16566 // correct sequence.
16568 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
16569 SelectionDAG &DAG) const {
16570 MachineFunction &MF = DAG.getMachineFunction();
16571 bool SplitStack = MF.shouldSplitStack();
16572 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
16577 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16578 SDNode* Node = Op.getNode();
16580 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
16581 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
16582 " not tell us which reg is the stack pointer!");
16583 EVT VT = Node->getValueType(0);
16584 SDValue Tmp1 = SDValue(Node, 0);
16585 SDValue Tmp2 = SDValue(Node, 1);
16586 SDValue Tmp3 = Node->getOperand(2);
16587 SDValue Chain = Tmp1.getOperand(0);
16589 // Chain the dynamic stack allocation so that it doesn't modify the stack
16590 // pointer when other instructions are using the stack.
16591 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
16594 SDValue Size = Tmp2.getOperand(1);
16595 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
16596 Chain = SP.getValue(1);
16597 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
16598 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
16599 unsigned StackAlign = TFI.getStackAlignment();
16600 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
16601 if (Align > StackAlign)
16602 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
16603 DAG.getConstant(-(uint64_t)Align, VT));
16604 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
16606 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
16607 DAG.getIntPtrConstant(0, true), SDValue(),
16610 SDValue Ops[2] = { Tmp1, Tmp2 };
16611 return DAG.getMergeValues(Ops, dl);
16615 SDValue Chain = Op.getOperand(0);
16616 SDValue Size = Op.getOperand(1);
16617 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
16618 EVT VT = Op.getNode()->getValueType(0);
16620 bool Is64Bit = Subtarget->is64Bit();
16621 EVT SPTy = getPointerTy();
16624 MachineRegisterInfo &MRI = MF.getRegInfo();
16627 // The 64 bit implementation of segmented stacks needs to clobber both r10
16628 // r11. This makes it impossible to use it along with nested parameters.
16629 const Function *F = MF.getFunction();
16631 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
16633 if (I->hasNestAttr())
16634 report_fatal_error("Cannot use segmented stacks with functions that "
16635 "have nested arguments.");
16638 const TargetRegisterClass *AddrRegClass =
16639 getRegClassFor(getPointerTy());
16640 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
16641 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
16642 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
16643 DAG.getRegister(Vreg, SPTy));
16644 SDValue Ops1[2] = { Value, Chain };
16645 return DAG.getMergeValues(Ops1, dl);
16648 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
16650 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
16651 Flag = Chain.getValue(1);
16652 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
16654 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
16656 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
16657 unsigned SPReg = RegInfo->getStackRegister();
16658 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
16659 Chain = SP.getValue(1);
16662 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
16663 DAG.getConstant(-(uint64_t)Align, VT));
16664 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
16667 SDValue Ops1[2] = { SP, Chain };
16668 return DAG.getMergeValues(Ops1, dl);
16672 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
16673 MachineFunction &MF = DAG.getMachineFunction();
16674 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
16676 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
16679 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
16680 // vastart just stores the address of the VarArgsFrameIndex slot into the
16681 // memory location argument.
16682 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
16684 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
16685 MachinePointerInfo(SV), false, false, 0);
16689 // gp_offset (0 - 6 * 8)
16690 // fp_offset (48 - 48 + 8 * 16)
16691 // overflow_arg_area (point to parameters coming in memory).
16693 SmallVector<SDValue, 8> MemOps;
16694 SDValue FIN = Op.getOperand(1);
16696 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
16697 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
16699 FIN, MachinePointerInfo(SV), false, false, 0);
16700 MemOps.push_back(Store);
16703 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16704 FIN, DAG.getIntPtrConstant(4));
16705 Store = DAG.getStore(Op.getOperand(0), DL,
16706 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
16708 FIN, MachinePointerInfo(SV, 4), false, false, 0);
16709 MemOps.push_back(Store);
16711 // Store ptr to overflow_arg_area
16712 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16713 FIN, DAG.getIntPtrConstant(4));
16714 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
16716 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
16717 MachinePointerInfo(SV, 8),
16719 MemOps.push_back(Store);
16721 // Store ptr to reg_save_area.
16722 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
16723 FIN, DAG.getIntPtrConstant(8));
16724 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
16726 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
16727 MachinePointerInfo(SV, 16), false, false, 0);
16728 MemOps.push_back(Store);
16729 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
16732 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
16733 assert(Subtarget->is64Bit() &&
16734 "LowerVAARG only handles 64-bit va_arg!");
16735 assert((Subtarget->isTargetLinux() ||
16736 Subtarget->isTargetDarwin()) &&
16737 "Unhandled target in LowerVAARG");
16738 assert(Op.getNode()->getNumOperands() == 4);
16739 SDValue Chain = Op.getOperand(0);
16740 SDValue SrcPtr = Op.getOperand(1);
16741 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
16742 unsigned Align = Op.getConstantOperandVal(3);
16745 EVT ArgVT = Op.getNode()->getValueType(0);
16746 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
16747 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
16750 // Decide which area this value should be read from.
16751 // TODO: Implement the AMD64 ABI in its entirety. This simple
16752 // selection mechanism works only for the basic types.
16753 if (ArgVT == MVT::f80) {
16754 llvm_unreachable("va_arg for f80 not yet implemented");
16755 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
16756 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
16757 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
16758 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
16760 llvm_unreachable("Unhandled argument type in LowerVAARG");
16763 if (ArgMode == 2) {
16764 // Sanity Check: Make sure using fp_offset makes sense.
16765 assert(!DAG.getTarget().Options.UseSoftFloat &&
16766 !(DAG.getMachineFunction()
16767 .getFunction()->getAttributes()
16768 .hasAttribute(AttributeSet::FunctionIndex,
16769 Attribute::NoImplicitFloat)) &&
16770 Subtarget->hasSSE1());
16773 // Insert VAARG_64 node into the DAG
16774 // VAARG_64 returns two values: Variable Argument Address, Chain
16775 SmallVector<SDValue, 11> InstOps;
16776 InstOps.push_back(Chain);
16777 InstOps.push_back(SrcPtr);
16778 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
16779 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
16780 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
16781 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
16782 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
16783 VTs, InstOps, MVT::i64,
16784 MachinePointerInfo(SV),
16786 /*Volatile=*/false,
16788 /*WriteMem=*/true);
16789 Chain = VAARG.getValue(1);
16791 // Load the next argument and return it
16792 return DAG.getLoad(ArgVT, dl,
16795 MachinePointerInfo(),
16796 false, false, false, 0);
16799 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
16800 SelectionDAG &DAG) {
16801 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
16802 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
16803 SDValue Chain = Op.getOperand(0);
16804 SDValue DstPtr = Op.getOperand(1);
16805 SDValue SrcPtr = Op.getOperand(2);
16806 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
16807 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
16810 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
16811 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
16813 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
16816 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
16817 // amount is a constant. Takes immediate version of shift as input.
16818 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
16819 SDValue SrcOp, uint64_t ShiftAmt,
16820 SelectionDAG &DAG) {
16821 MVT ElementType = VT.getVectorElementType();
16823 // Fold this packed shift into its first operand if ShiftAmt is 0.
16827 // Check for ShiftAmt >= element width
16828 if (ShiftAmt >= ElementType.getSizeInBits()) {
16829 if (Opc == X86ISD::VSRAI)
16830 ShiftAmt = ElementType.getSizeInBits() - 1;
16832 return DAG.getConstant(0, VT);
16835 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
16836 && "Unknown target vector shift-by-constant node");
16838 // Fold this packed vector shift into a build vector if SrcOp is a
16839 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
16840 if (VT == SrcOp.getSimpleValueType() &&
16841 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
16842 SmallVector<SDValue, 8> Elts;
16843 unsigned NumElts = SrcOp->getNumOperands();
16844 ConstantSDNode *ND;
16847 default: llvm_unreachable(nullptr);
16848 case X86ISD::VSHLI:
16849 for (unsigned i=0; i!=NumElts; ++i) {
16850 SDValue CurrentOp = SrcOp->getOperand(i);
16851 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16852 Elts.push_back(CurrentOp);
16855 ND = cast<ConstantSDNode>(CurrentOp);
16856 const APInt &C = ND->getAPIntValue();
16857 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
16860 case X86ISD::VSRLI:
16861 for (unsigned i=0; i!=NumElts; ++i) {
16862 SDValue CurrentOp = SrcOp->getOperand(i);
16863 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16864 Elts.push_back(CurrentOp);
16867 ND = cast<ConstantSDNode>(CurrentOp);
16868 const APInt &C = ND->getAPIntValue();
16869 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
16872 case X86ISD::VSRAI:
16873 for (unsigned i=0; i!=NumElts; ++i) {
16874 SDValue CurrentOp = SrcOp->getOperand(i);
16875 if (CurrentOp->getOpcode() == ISD::UNDEF) {
16876 Elts.push_back(CurrentOp);
16879 ND = cast<ConstantSDNode>(CurrentOp);
16880 const APInt &C = ND->getAPIntValue();
16881 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
16886 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
16889 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
16892 // getTargetVShiftNode - Handle vector element shifts where the shift amount
16893 // may or may not be a constant. Takes immediate version of shift as input.
16894 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
16895 SDValue SrcOp, SDValue ShAmt,
16896 SelectionDAG &DAG) {
16897 MVT SVT = ShAmt.getSimpleValueType();
16898 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
16900 // Catch shift-by-constant.
16901 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
16902 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
16903 CShAmt->getZExtValue(), DAG);
16905 // Change opcode to non-immediate version
16907 default: llvm_unreachable("Unknown target vector shift node");
16908 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
16909 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
16910 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
16913 const X86Subtarget &Subtarget =
16914 static_cast<const X86Subtarget &>(DAG.getSubtarget());
16915 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
16916 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
16917 // Let the shuffle legalizer expand this shift amount node.
16918 SDValue Op0 = ShAmt.getOperand(0);
16919 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
16920 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
16922 // Need to build a vector containing shift amount.
16923 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
16924 SmallVector<SDValue, 4> ShOps;
16925 ShOps.push_back(ShAmt);
16926 if (SVT == MVT::i32) {
16927 ShOps.push_back(DAG.getConstant(0, SVT));
16928 ShOps.push_back(DAG.getUNDEF(SVT));
16930 ShOps.push_back(DAG.getUNDEF(SVT));
16932 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
16933 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
16936 // The return type has to be a 128-bit type with the same element
16937 // type as the input type.
16938 MVT EltVT = VT.getVectorElementType();
16939 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
16941 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
16942 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
16945 /// \brief Return (and \p Op, \p Mask) for compare instructions or
16946 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
16947 /// necessary casting for \p Mask when lowering masking intrinsics.
16948 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
16949 SDValue PreservedSrc,
16950 const X86Subtarget *Subtarget,
16951 SelectionDAG &DAG) {
16952 EVT VT = Op.getValueType();
16953 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
16954 MVT::i1, VT.getVectorNumElements());
16955 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
16956 Mask.getValueType().getSizeInBits());
16959 assert(MaskVT.isSimple() && "invalid mask type");
16961 if (isAllOnes(Mask))
16964 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
16965 // are extracted by EXTRACT_SUBVECTOR.
16966 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
16967 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
16968 DAG.getIntPtrConstant(0));
16970 switch (Op.getOpcode()) {
16972 case X86ISD::PCMPEQM:
16973 case X86ISD::PCMPGTM:
16975 case X86ISD::CMPMU:
16976 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
16978 if (PreservedSrc.getOpcode() == ISD::UNDEF)
16979 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
16980 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
16983 /// \brief Creates an SDNode for a predicated scalar operation.
16984 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
16985 /// The mask is comming as MVT::i8 and it should be truncated
16986 /// to MVT::i1 while lowering masking intrinsics.
16987 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
16988 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
16989 /// a scalar instruction.
16990 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
16991 SDValue PreservedSrc,
16992 const X86Subtarget *Subtarget,
16993 SelectionDAG &DAG) {
16994 if (isAllOnes(Mask))
16997 EVT VT = Op.getValueType();
16999 // The mask should be of type MVT::i1
17000 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17002 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17003 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17004 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17007 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17008 SelectionDAG &DAG) {
17010 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17011 EVT VT = Op.getValueType();
17012 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17014 switch(IntrData->Type) {
17015 case INTR_TYPE_1OP:
17016 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17017 case INTR_TYPE_2OP:
17018 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17020 case INTR_TYPE_3OP:
17021 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17022 Op.getOperand(2), Op.getOperand(3));
17023 case INTR_TYPE_1OP_MASK_RM: {
17024 SDValue Src = Op.getOperand(1);
17025 SDValue Src0 = Op.getOperand(2);
17026 SDValue Mask = Op.getOperand(3);
17027 SDValue RoundingMode = Op.getOperand(4);
17028 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17030 Mask, Src0, Subtarget, DAG);
17032 case INTR_TYPE_SCALAR_MASK_RM: {
17033 SDValue Src1 = Op.getOperand(1);
17034 SDValue Src2 = Op.getOperand(2);
17035 SDValue Src0 = Op.getOperand(3);
17036 SDValue Mask = Op.getOperand(4);
17037 SDValue RoundingMode = Op.getOperand(5);
17038 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17040 Mask, Src0, Subtarget, DAG);
17042 case INTR_TYPE_2OP_MASK: {
17043 SDValue Mask = Op.getOperand(4);
17044 SDValue PassThru = Op.getOperand(3);
17045 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17046 if (IntrWithRoundingModeOpcode != 0) {
17047 unsigned Round = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
17048 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17049 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17050 dl, Op.getValueType(),
17051 Op.getOperand(1), Op.getOperand(2),
17052 Op.getOperand(3), Op.getOperand(5)),
17053 Mask, PassThru, Subtarget, DAG);
17056 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17059 Mask, PassThru, Subtarget, DAG);
17061 case FMA_OP_MASK: {
17062 SDValue Src1 = Op.getOperand(1);
17063 SDValue Src2 = Op.getOperand(2);
17064 SDValue Src3 = Op.getOperand(3);
17065 SDValue Mask = Op.getOperand(4);
17066 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17067 if (IntrWithRoundingModeOpcode != 0) {
17068 SDValue Rnd = Op.getOperand(5);
17069 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17070 X86::STATIC_ROUNDING::CUR_DIRECTION)
17071 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17072 dl, Op.getValueType(),
17073 Src1, Src2, Src3, Rnd),
17074 Mask, Src1, Subtarget, DAG);
17076 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17077 dl, Op.getValueType(),
17079 Mask, Src1, Subtarget, DAG);
17082 case CMP_MASK_CC: {
17083 // Comparison intrinsics with masks.
17084 // Example of transformation:
17085 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17086 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17088 // (v8i1 (insert_subvector undef,
17089 // (v2i1 (and (PCMPEQM %a, %b),
17090 // (extract_subvector
17091 // (v8i1 (bitcast %mask)), 0))), 0))))
17092 EVT VT = Op.getOperand(1).getValueType();
17093 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17094 VT.getVectorNumElements());
17095 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17096 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17097 Mask.getValueType().getSizeInBits());
17099 if (IntrData->Type == CMP_MASK_CC) {
17100 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17101 Op.getOperand(2), Op.getOperand(3));
17103 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17104 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17107 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17108 DAG.getTargetConstant(0, MaskVT),
17110 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17111 DAG.getUNDEF(BitcastVT), CmpMask,
17112 DAG.getIntPtrConstant(0));
17113 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17115 case COMI: { // Comparison intrinsics
17116 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17117 SDValue LHS = Op.getOperand(1);
17118 SDValue RHS = Op.getOperand(2);
17119 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17120 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17121 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17122 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17123 DAG.getConstant(X86CC, MVT::i8), Cond);
17124 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17127 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17128 Op.getOperand(1), Op.getOperand(2), DAG);
17130 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17131 Op.getSimpleValueType(),
17133 Op.getOperand(2), DAG),
17134 Op.getOperand(4), Op.getOperand(3), Subtarget,
17136 case COMPRESS_EXPAND_IN_REG: {
17137 SDValue Mask = Op.getOperand(3);
17138 SDValue DataToCompress = Op.getOperand(1);
17139 SDValue PassThru = Op.getOperand(2);
17140 if (isAllOnes(Mask)) // return data as is
17141 return Op.getOperand(1);
17142 EVT VT = Op.getValueType();
17143 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17144 VT.getVectorNumElements());
17145 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17146 Mask.getValueType().getSizeInBits());
17148 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17149 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17150 DAG.getIntPtrConstant(0));
17152 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17156 SDValue Mask = Op.getOperand(3);
17157 EVT VT = Op.getValueType();
17158 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17159 VT.getVectorNumElements());
17160 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17161 Mask.getValueType().getSizeInBits());
17163 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17164 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17165 DAG.getIntPtrConstant(0));
17166 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17175 default: return SDValue(); // Don't custom lower most intrinsics.
17177 case Intrinsic::x86_avx512_mask_valign_q_512:
17178 case Intrinsic::x86_avx512_mask_valign_d_512:
17179 // Vector source operands are swapped.
17180 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17181 Op.getValueType(), Op.getOperand(2),
17184 Op.getOperand(5), Op.getOperand(4),
17187 // ptest and testp intrinsics. The intrinsic these come from are designed to
17188 // return an integer value, not just an instruction so lower it to the ptest
17189 // or testp pattern and a setcc for the result.
17190 case Intrinsic::x86_sse41_ptestz:
17191 case Intrinsic::x86_sse41_ptestc:
17192 case Intrinsic::x86_sse41_ptestnzc:
17193 case Intrinsic::x86_avx_ptestz_256:
17194 case Intrinsic::x86_avx_ptestc_256:
17195 case Intrinsic::x86_avx_ptestnzc_256:
17196 case Intrinsic::x86_avx_vtestz_ps:
17197 case Intrinsic::x86_avx_vtestc_ps:
17198 case Intrinsic::x86_avx_vtestnzc_ps:
17199 case Intrinsic::x86_avx_vtestz_pd:
17200 case Intrinsic::x86_avx_vtestc_pd:
17201 case Intrinsic::x86_avx_vtestnzc_pd:
17202 case Intrinsic::x86_avx_vtestz_ps_256:
17203 case Intrinsic::x86_avx_vtestc_ps_256:
17204 case Intrinsic::x86_avx_vtestnzc_ps_256:
17205 case Intrinsic::x86_avx_vtestz_pd_256:
17206 case Intrinsic::x86_avx_vtestc_pd_256:
17207 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17208 bool IsTestPacked = false;
17211 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17212 case Intrinsic::x86_avx_vtestz_ps:
17213 case Intrinsic::x86_avx_vtestz_pd:
17214 case Intrinsic::x86_avx_vtestz_ps_256:
17215 case Intrinsic::x86_avx_vtestz_pd_256:
17216 IsTestPacked = true; // Fallthrough
17217 case Intrinsic::x86_sse41_ptestz:
17218 case Intrinsic::x86_avx_ptestz_256:
17220 X86CC = X86::COND_E;
17222 case Intrinsic::x86_avx_vtestc_ps:
17223 case Intrinsic::x86_avx_vtestc_pd:
17224 case Intrinsic::x86_avx_vtestc_ps_256:
17225 case Intrinsic::x86_avx_vtestc_pd_256:
17226 IsTestPacked = true; // Fallthrough
17227 case Intrinsic::x86_sse41_ptestc:
17228 case Intrinsic::x86_avx_ptestc_256:
17230 X86CC = X86::COND_B;
17232 case Intrinsic::x86_avx_vtestnzc_ps:
17233 case Intrinsic::x86_avx_vtestnzc_pd:
17234 case Intrinsic::x86_avx_vtestnzc_ps_256:
17235 case Intrinsic::x86_avx_vtestnzc_pd_256:
17236 IsTestPacked = true; // Fallthrough
17237 case Intrinsic::x86_sse41_ptestnzc:
17238 case Intrinsic::x86_avx_ptestnzc_256:
17240 X86CC = X86::COND_A;
17244 SDValue LHS = Op.getOperand(1);
17245 SDValue RHS = Op.getOperand(2);
17246 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17247 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17248 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17249 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17250 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17252 case Intrinsic::x86_avx512_kortestz_w:
17253 case Intrinsic::x86_avx512_kortestc_w: {
17254 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17255 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17256 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17257 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17258 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17259 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17260 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17263 case Intrinsic::x86_sse42_pcmpistria128:
17264 case Intrinsic::x86_sse42_pcmpestria128:
17265 case Intrinsic::x86_sse42_pcmpistric128:
17266 case Intrinsic::x86_sse42_pcmpestric128:
17267 case Intrinsic::x86_sse42_pcmpistrio128:
17268 case Intrinsic::x86_sse42_pcmpestrio128:
17269 case Intrinsic::x86_sse42_pcmpistris128:
17270 case Intrinsic::x86_sse42_pcmpestris128:
17271 case Intrinsic::x86_sse42_pcmpistriz128:
17272 case Intrinsic::x86_sse42_pcmpestriz128: {
17276 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17277 case Intrinsic::x86_sse42_pcmpistria128:
17278 Opcode = X86ISD::PCMPISTRI;
17279 X86CC = X86::COND_A;
17281 case Intrinsic::x86_sse42_pcmpestria128:
17282 Opcode = X86ISD::PCMPESTRI;
17283 X86CC = X86::COND_A;
17285 case Intrinsic::x86_sse42_pcmpistric128:
17286 Opcode = X86ISD::PCMPISTRI;
17287 X86CC = X86::COND_B;
17289 case Intrinsic::x86_sse42_pcmpestric128:
17290 Opcode = X86ISD::PCMPESTRI;
17291 X86CC = X86::COND_B;
17293 case Intrinsic::x86_sse42_pcmpistrio128:
17294 Opcode = X86ISD::PCMPISTRI;
17295 X86CC = X86::COND_O;
17297 case Intrinsic::x86_sse42_pcmpestrio128:
17298 Opcode = X86ISD::PCMPESTRI;
17299 X86CC = X86::COND_O;
17301 case Intrinsic::x86_sse42_pcmpistris128:
17302 Opcode = X86ISD::PCMPISTRI;
17303 X86CC = X86::COND_S;
17305 case Intrinsic::x86_sse42_pcmpestris128:
17306 Opcode = X86ISD::PCMPESTRI;
17307 X86CC = X86::COND_S;
17309 case Intrinsic::x86_sse42_pcmpistriz128:
17310 Opcode = X86ISD::PCMPISTRI;
17311 X86CC = X86::COND_E;
17313 case Intrinsic::x86_sse42_pcmpestriz128:
17314 Opcode = X86ISD::PCMPESTRI;
17315 X86CC = X86::COND_E;
17318 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17319 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17320 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17321 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17322 DAG.getConstant(X86CC, MVT::i8),
17323 SDValue(PCMP.getNode(), 1));
17324 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17327 case Intrinsic::x86_sse42_pcmpistri128:
17328 case Intrinsic::x86_sse42_pcmpestri128: {
17330 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17331 Opcode = X86ISD::PCMPISTRI;
17333 Opcode = X86ISD::PCMPESTRI;
17335 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17336 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17337 return DAG.getNode(Opcode, dl, VTs, NewOps);
17342 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17343 SDValue Src, SDValue Mask, SDValue Base,
17344 SDValue Index, SDValue ScaleOp, SDValue Chain,
17345 const X86Subtarget * Subtarget) {
17347 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17348 assert(C && "Invalid scale type");
17349 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17350 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17351 Index.getSimpleValueType().getVectorNumElements());
17353 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17355 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17357 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17358 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17359 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17360 SDValue Segment = DAG.getRegister(0, MVT::i32);
17361 if (Src.getOpcode() == ISD::UNDEF)
17362 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17363 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17364 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17365 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17366 return DAG.getMergeValues(RetOps, dl);
17369 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17370 SDValue Src, SDValue Mask, SDValue Base,
17371 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17373 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17374 assert(C && "Invalid scale type");
17375 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17376 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17377 SDValue Segment = DAG.getRegister(0, MVT::i32);
17378 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17379 Index.getSimpleValueType().getVectorNumElements());
17381 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17383 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17385 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17386 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17387 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17388 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17389 return SDValue(Res, 1);
17392 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17393 SDValue Mask, SDValue Base, SDValue Index,
17394 SDValue ScaleOp, SDValue Chain) {
17396 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17397 assert(C && "Invalid scale type");
17398 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17399 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17400 SDValue Segment = DAG.getRegister(0, MVT::i32);
17402 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17404 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17406 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17408 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17409 //SDVTList VTs = DAG.getVTList(MVT::Other);
17410 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17411 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17412 return SDValue(Res, 0);
17415 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17416 // read performance monitor counters (x86_rdpmc).
17417 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17418 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17419 SmallVectorImpl<SDValue> &Results) {
17420 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17421 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17424 // The ECX register is used to select the index of the performance counter
17426 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17428 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17430 // Reads the content of a 64-bit performance counter and returns it in the
17431 // registers EDX:EAX.
17432 if (Subtarget->is64Bit()) {
17433 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17434 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17437 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17438 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17441 Chain = HI.getValue(1);
17443 if (Subtarget->is64Bit()) {
17444 // The EAX register is loaded with the low-order 32 bits. The EDX register
17445 // is loaded with the supported high-order bits of the counter.
17446 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17447 DAG.getConstant(32, MVT::i8));
17448 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17449 Results.push_back(Chain);
17453 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17454 SDValue Ops[] = { LO, HI };
17455 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17456 Results.push_back(Pair);
17457 Results.push_back(Chain);
17460 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17461 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17462 // also used to custom lower READCYCLECOUNTER nodes.
17463 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17464 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17465 SmallVectorImpl<SDValue> &Results) {
17466 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17467 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17470 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17471 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17472 // and the EAX register is loaded with the low-order 32 bits.
17473 if (Subtarget->is64Bit()) {
17474 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17475 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17478 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17479 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17482 SDValue Chain = HI.getValue(1);
17484 if (Opcode == X86ISD::RDTSCP_DAG) {
17485 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17487 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17488 // the ECX register. Add 'ecx' explicitly to the chain.
17489 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17491 // Explicitly store the content of ECX at the location passed in input
17492 // to the 'rdtscp' intrinsic.
17493 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17494 MachinePointerInfo(), false, false, 0);
17497 if (Subtarget->is64Bit()) {
17498 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17499 // the EAX register is loaded with the low-order 32 bits.
17500 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17501 DAG.getConstant(32, MVT::i8));
17502 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17503 Results.push_back(Chain);
17507 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17508 SDValue Ops[] = { LO, HI };
17509 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17510 Results.push_back(Pair);
17511 Results.push_back(Chain);
17514 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17515 SelectionDAG &DAG) {
17516 SmallVector<SDValue, 2> Results;
17518 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17520 return DAG.getMergeValues(Results, DL);
17524 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17525 SelectionDAG &DAG) {
17526 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17528 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17533 switch(IntrData->Type) {
17535 llvm_unreachable("Unknown Intrinsic Type");
17539 // Emit the node with the right value type.
17540 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17541 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17543 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17544 // Otherwise return the value from Rand, which is always 0, casted to i32.
17545 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17546 DAG.getConstant(1, Op->getValueType(1)),
17547 DAG.getConstant(X86::COND_B, MVT::i32),
17548 SDValue(Result.getNode(), 1) };
17549 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17550 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17553 // Return { result, isValid, chain }.
17554 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17555 SDValue(Result.getNode(), 2));
17558 //gather(v1, mask, index, base, scale);
17559 SDValue Chain = Op.getOperand(0);
17560 SDValue Src = Op.getOperand(2);
17561 SDValue Base = Op.getOperand(3);
17562 SDValue Index = Op.getOperand(4);
17563 SDValue Mask = Op.getOperand(5);
17564 SDValue Scale = Op.getOperand(6);
17565 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
17569 //scatter(base, mask, index, v1, scale);
17570 SDValue Chain = Op.getOperand(0);
17571 SDValue Base = Op.getOperand(2);
17572 SDValue Mask = Op.getOperand(3);
17573 SDValue Index = Op.getOperand(4);
17574 SDValue Src = Op.getOperand(5);
17575 SDValue Scale = Op.getOperand(6);
17576 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
17579 SDValue Hint = Op.getOperand(6);
17581 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
17582 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
17583 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
17584 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
17585 SDValue Chain = Op.getOperand(0);
17586 SDValue Mask = Op.getOperand(2);
17587 SDValue Index = Op.getOperand(3);
17588 SDValue Base = Op.getOperand(4);
17589 SDValue Scale = Op.getOperand(5);
17590 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
17592 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
17594 SmallVector<SDValue, 2> Results;
17595 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
17596 return DAG.getMergeValues(Results, dl);
17598 // Read Performance Monitoring Counters.
17600 SmallVector<SDValue, 2> Results;
17601 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
17602 return DAG.getMergeValues(Results, dl);
17604 // XTEST intrinsics.
17606 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17607 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17608 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17609 DAG.getConstant(X86::COND_NE, MVT::i8),
17611 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
17612 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
17613 Ret, SDValue(InTrans.getNode(), 1));
17617 SmallVector<SDValue, 2> Results;
17618 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17619 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
17620 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
17621 DAG.getConstant(-1, MVT::i8));
17622 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
17623 Op.getOperand(4), GenCF.getValue(1));
17624 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
17625 Op.getOperand(5), MachinePointerInfo(),
17627 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17628 DAG.getConstant(X86::COND_B, MVT::i8),
17630 Results.push_back(SetCC);
17631 Results.push_back(Store);
17632 return DAG.getMergeValues(Results, dl);
17634 case COMPRESS_TO_MEM: {
17636 SDValue Mask = Op.getOperand(4);
17637 SDValue DataToCompress = Op.getOperand(3);
17638 SDValue Addr = Op.getOperand(2);
17639 SDValue Chain = Op.getOperand(0);
17641 if (isAllOnes(Mask)) // return just a store
17642 return DAG.getStore(Chain, dl, DataToCompress, Addr,
17643 MachinePointerInfo(), false, false, 0);
17645 EVT VT = DataToCompress.getValueType();
17646 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17647 VT.getVectorNumElements());
17648 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17649 Mask.getValueType().getSizeInBits());
17650 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17651 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17652 DAG.getIntPtrConstant(0));
17654 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
17655 DataToCompress, DAG.getUNDEF(VT));
17656 return DAG.getStore(Chain, dl, Compressed, Addr,
17657 MachinePointerInfo(), false, false, 0);
17659 case EXPAND_FROM_MEM: {
17661 SDValue Mask = Op.getOperand(4);
17662 SDValue PathThru = Op.getOperand(3);
17663 SDValue Addr = Op.getOperand(2);
17664 SDValue Chain = Op.getOperand(0);
17665 EVT VT = Op.getValueType();
17667 if (isAllOnes(Mask)) // return just a load
17668 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
17670 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17671 VT.getVectorNumElements());
17672 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17673 Mask.getValueType().getSizeInBits());
17674 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17675 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17676 DAG.getIntPtrConstant(0));
17678 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
17679 false, false, false, 0);
17681 SmallVector<SDValue, 2> Results;
17682 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
17684 Results.push_back(Chain);
17685 return DAG.getMergeValues(Results, dl);
17690 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
17691 SelectionDAG &DAG) const {
17692 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
17693 MFI->setReturnAddressIsTaken(true);
17695 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
17698 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17700 EVT PtrVT = getPointerTy();
17703 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
17704 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17705 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
17706 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
17707 DAG.getNode(ISD::ADD, dl, PtrVT,
17708 FrameAddr, Offset),
17709 MachinePointerInfo(), false, false, false, 0);
17712 // Just load the return address.
17713 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
17714 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
17715 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
17718 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
17719 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
17720 MFI->setFrameAddressIsTaken(true);
17722 EVT VT = Op.getValueType();
17723 SDLoc dl(Op); // FIXME probably not meaningful
17724 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17725 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17726 unsigned FrameReg = RegInfo->getPtrSizedFrameRegister(
17727 DAG.getMachineFunction());
17728 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
17729 (FrameReg == X86::EBP && VT == MVT::i32)) &&
17730 "Invalid Frame Register!");
17731 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
17733 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
17734 MachinePointerInfo(),
17735 false, false, false, 0);
17739 // FIXME? Maybe this could be a TableGen attribute on some registers and
17740 // this table could be generated automatically from RegInfo.
17741 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
17743 unsigned Reg = StringSwitch<unsigned>(RegName)
17744 .Case("esp", X86::ESP)
17745 .Case("rsp", X86::RSP)
17749 report_fatal_error("Invalid register name global variable");
17752 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
17753 SelectionDAG &DAG) const {
17754 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17755 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
17758 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
17759 SDValue Chain = Op.getOperand(0);
17760 SDValue Offset = Op.getOperand(1);
17761 SDValue Handler = Op.getOperand(2);
17764 EVT PtrVT = getPointerTy();
17765 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17766 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
17767 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
17768 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
17769 "Invalid Frame Register!");
17770 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
17771 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
17773 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
17774 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
17775 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
17776 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
17778 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
17780 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
17781 DAG.getRegister(StoreAddrReg, PtrVT));
17784 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
17785 SelectionDAG &DAG) const {
17787 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
17788 DAG.getVTList(MVT::i32, MVT::Other),
17789 Op.getOperand(0), Op.getOperand(1));
17792 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
17793 SelectionDAG &DAG) const {
17795 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
17796 Op.getOperand(0), Op.getOperand(1));
17799 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
17800 return Op.getOperand(0);
17803 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
17804 SelectionDAG &DAG) const {
17805 SDValue Root = Op.getOperand(0);
17806 SDValue Trmp = Op.getOperand(1); // trampoline
17807 SDValue FPtr = Op.getOperand(2); // nested function
17808 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
17811 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17812 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
17814 if (Subtarget->is64Bit()) {
17815 SDValue OutChains[6];
17817 // Large code-model.
17818 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
17819 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
17821 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
17822 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
17824 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
17826 // Load the pointer to the nested function into R11.
17827 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
17828 SDValue Addr = Trmp;
17829 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
17830 Addr, MachinePointerInfo(TrmpAddr),
17833 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17834 DAG.getConstant(2, MVT::i64));
17835 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
17836 MachinePointerInfo(TrmpAddr, 2),
17839 // Load the 'nest' parameter value into R10.
17840 // R10 is specified in X86CallingConv.td
17841 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
17842 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17843 DAG.getConstant(10, MVT::i64));
17844 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
17845 Addr, MachinePointerInfo(TrmpAddr, 10),
17848 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17849 DAG.getConstant(12, MVT::i64));
17850 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
17851 MachinePointerInfo(TrmpAddr, 12),
17854 // Jump to the nested function.
17855 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
17856 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17857 DAG.getConstant(20, MVT::i64));
17858 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
17859 Addr, MachinePointerInfo(TrmpAddr, 20),
17862 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
17863 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
17864 DAG.getConstant(22, MVT::i64));
17865 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
17866 MachinePointerInfo(TrmpAddr, 22),
17869 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
17871 const Function *Func =
17872 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
17873 CallingConv::ID CC = Func->getCallingConv();
17878 llvm_unreachable("Unsupported calling convention");
17879 case CallingConv::C:
17880 case CallingConv::X86_StdCall: {
17881 // Pass 'nest' parameter in ECX.
17882 // Must be kept in sync with X86CallingConv.td
17883 NestReg = X86::ECX;
17885 // Check that ECX wasn't needed by an 'inreg' parameter.
17886 FunctionType *FTy = Func->getFunctionType();
17887 const AttributeSet &Attrs = Func->getAttributes();
17889 if (!Attrs.isEmpty() && !Func->isVarArg()) {
17890 unsigned InRegCount = 0;
17893 for (FunctionType::param_iterator I = FTy->param_begin(),
17894 E = FTy->param_end(); I != E; ++I, ++Idx)
17895 if (Attrs.hasAttribute(Idx, Attribute::InReg))
17896 // FIXME: should only count parameters that are lowered to integers.
17897 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
17899 if (InRegCount > 2) {
17900 report_fatal_error("Nest register in use - reduce number of inreg"
17906 case CallingConv::X86_FastCall:
17907 case CallingConv::X86_ThisCall:
17908 case CallingConv::Fast:
17909 // Pass 'nest' parameter in EAX.
17910 // Must be kept in sync with X86CallingConv.td
17911 NestReg = X86::EAX;
17915 SDValue OutChains[4];
17916 SDValue Addr, Disp;
17918 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17919 DAG.getConstant(10, MVT::i32));
17920 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
17922 // This is storing the opcode for MOV32ri.
17923 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
17924 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
17925 OutChains[0] = DAG.getStore(Root, dl,
17926 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
17927 Trmp, MachinePointerInfo(TrmpAddr),
17930 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17931 DAG.getConstant(1, MVT::i32));
17932 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
17933 MachinePointerInfo(TrmpAddr, 1),
17936 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
17937 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17938 DAG.getConstant(5, MVT::i32));
17939 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
17940 MachinePointerInfo(TrmpAddr, 5),
17943 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
17944 DAG.getConstant(6, MVT::i32));
17945 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
17946 MachinePointerInfo(TrmpAddr, 6),
17949 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
17953 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
17954 SelectionDAG &DAG) const {
17956 The rounding mode is in bits 11:10 of FPSR, and has the following
17958 00 Round to nearest
17963 FLT_ROUNDS, on the other hand, expects the following:
17970 To perform the conversion, we do:
17971 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
17974 MachineFunction &MF = DAG.getMachineFunction();
17975 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
17976 unsigned StackAlignment = TFI.getStackAlignment();
17977 MVT VT = Op.getSimpleValueType();
17980 // Save FP Control Word to stack slot
17981 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
17982 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
17984 MachineMemOperand *MMO =
17985 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
17986 MachineMemOperand::MOStore, 2, 2);
17988 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
17989 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
17990 DAG.getVTList(MVT::Other),
17991 Ops, MVT::i16, MMO);
17993 // Load FP Control Word from stack slot
17994 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
17995 MachinePointerInfo(), false, false, false, 0);
17997 // Transform as necessary
17999 DAG.getNode(ISD::SRL, DL, MVT::i16,
18000 DAG.getNode(ISD::AND, DL, MVT::i16,
18001 CWD, DAG.getConstant(0x800, MVT::i16)),
18002 DAG.getConstant(11, MVT::i8));
18004 DAG.getNode(ISD::SRL, DL, MVT::i16,
18005 DAG.getNode(ISD::AND, DL, MVT::i16,
18006 CWD, DAG.getConstant(0x400, MVT::i16)),
18007 DAG.getConstant(9, MVT::i8));
18010 DAG.getNode(ISD::AND, DL, MVT::i16,
18011 DAG.getNode(ISD::ADD, DL, MVT::i16,
18012 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18013 DAG.getConstant(1, MVT::i16)),
18014 DAG.getConstant(3, MVT::i16));
18016 return DAG.getNode((VT.getSizeInBits() < 16 ?
18017 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18020 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18021 MVT VT = Op.getSimpleValueType();
18023 unsigned NumBits = VT.getSizeInBits();
18026 Op = Op.getOperand(0);
18027 if (VT == MVT::i8) {
18028 // Zero extend to i32 since there is not an i8 bsr.
18030 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18033 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18034 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18035 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18037 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18040 DAG.getConstant(NumBits+NumBits-1, OpVT),
18041 DAG.getConstant(X86::COND_E, MVT::i8),
18044 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18046 // Finally xor with NumBits-1.
18047 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18050 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18054 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18055 MVT VT = Op.getSimpleValueType();
18057 unsigned NumBits = VT.getSizeInBits();
18060 Op = Op.getOperand(0);
18061 if (VT == MVT::i8) {
18062 // Zero extend to i32 since there is not an i8 bsr.
18064 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18067 // Issue a bsr (scan bits in reverse).
18068 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18069 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18071 // And xor with NumBits-1.
18072 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18075 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18079 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18080 MVT VT = Op.getSimpleValueType();
18081 unsigned NumBits = VT.getSizeInBits();
18083 Op = Op.getOperand(0);
18085 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18086 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18087 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18089 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18092 DAG.getConstant(NumBits, VT),
18093 DAG.getConstant(X86::COND_E, MVT::i8),
18096 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18099 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18100 // ones, and then concatenate the result back.
18101 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18102 MVT VT = Op.getSimpleValueType();
18104 assert(VT.is256BitVector() && VT.isInteger() &&
18105 "Unsupported value type for operation");
18107 unsigned NumElems = VT.getVectorNumElements();
18110 // Extract the LHS vectors
18111 SDValue LHS = Op.getOperand(0);
18112 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18113 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18115 // Extract the RHS vectors
18116 SDValue RHS = Op.getOperand(1);
18117 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18118 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18120 MVT EltVT = VT.getVectorElementType();
18121 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18123 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18124 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18125 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18128 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18129 assert(Op.getSimpleValueType().is256BitVector() &&
18130 Op.getSimpleValueType().isInteger() &&
18131 "Only handle AVX 256-bit vector integer operation");
18132 return Lower256IntArith(Op, DAG);
18135 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18136 assert(Op.getSimpleValueType().is256BitVector() &&
18137 Op.getSimpleValueType().isInteger() &&
18138 "Only handle AVX 256-bit vector integer operation");
18139 return Lower256IntArith(Op, DAG);
18142 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18143 SelectionDAG &DAG) {
18145 MVT VT = Op.getSimpleValueType();
18147 // Decompose 256-bit ops into smaller 128-bit ops.
18148 if (VT.is256BitVector() && !Subtarget->hasInt256())
18149 return Lower256IntArith(Op, DAG);
18151 SDValue A = Op.getOperand(0);
18152 SDValue B = Op.getOperand(1);
18154 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18155 if (VT == MVT::v4i32) {
18156 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18157 "Should not custom lower when pmuldq is available!");
18159 // Extract the odd parts.
18160 static const int UnpackMask[] = { 1, -1, 3, -1 };
18161 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18162 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18164 // Multiply the even parts.
18165 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18166 // Now multiply odd parts.
18167 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18169 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18170 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18172 // Merge the two vectors back together with a shuffle. This expands into 2
18174 static const int ShufMask[] = { 0, 4, 2, 6 };
18175 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18178 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18179 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18181 // Ahi = psrlqi(a, 32);
18182 // Bhi = psrlqi(b, 32);
18184 // AloBlo = pmuludq(a, b);
18185 // AloBhi = pmuludq(a, Bhi);
18186 // AhiBlo = pmuludq(Ahi, b);
18188 // AloBhi = psllqi(AloBhi, 32);
18189 // AhiBlo = psllqi(AhiBlo, 32);
18190 // return AloBlo + AloBhi + AhiBlo;
18192 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18193 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18195 // Bit cast to 32-bit vectors for MULUDQ
18196 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18197 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18198 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18199 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18200 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18201 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18203 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18204 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18205 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18207 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18208 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18210 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18211 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18214 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18215 assert(Subtarget->isTargetWin64() && "Unexpected target");
18216 EVT VT = Op.getValueType();
18217 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18218 "Unexpected return type for lowering");
18222 switch (Op->getOpcode()) {
18223 default: llvm_unreachable("Unexpected request for libcall!");
18224 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18225 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18226 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18227 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18228 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18229 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18233 SDValue InChain = DAG.getEntryNode();
18235 TargetLowering::ArgListTy Args;
18236 TargetLowering::ArgListEntry Entry;
18237 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18238 EVT ArgVT = Op->getOperand(i).getValueType();
18239 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18240 "Unexpected argument type for lowering");
18241 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18242 Entry.Node = StackPtr;
18243 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18245 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18246 Entry.Ty = PointerType::get(ArgTy,0);
18247 Entry.isSExt = false;
18248 Entry.isZExt = false;
18249 Args.push_back(Entry);
18252 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18255 TargetLowering::CallLoweringInfo CLI(DAG);
18256 CLI.setDebugLoc(dl).setChain(InChain)
18257 .setCallee(getLibcallCallingConv(LC),
18258 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18259 Callee, std::move(Args), 0)
18260 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18262 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18263 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18266 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18267 SelectionDAG &DAG) {
18268 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18269 EVT VT = Op0.getValueType();
18272 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18273 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18275 // PMULxD operations multiply each even value (starting at 0) of LHS with
18276 // the related value of RHS and produce a widen result.
18277 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18278 // => <2 x i64> <ae|cg>
18280 // In other word, to have all the results, we need to perform two PMULxD:
18281 // 1. one with the even values.
18282 // 2. one with the odd values.
18283 // To achieve #2, with need to place the odd values at an even position.
18285 // Place the odd value at an even position (basically, shift all values 1
18286 // step to the left):
18287 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18288 // <a|b|c|d> => <b|undef|d|undef>
18289 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18290 // <e|f|g|h> => <f|undef|h|undef>
18291 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18293 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18295 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18296 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18298 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18299 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18300 // => <2 x i64> <ae|cg>
18301 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18302 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18303 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18304 // => <2 x i64> <bf|dh>
18305 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18306 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18308 // Shuffle it back into the right order.
18309 SDValue Highs, Lows;
18310 if (VT == MVT::v8i32) {
18311 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18312 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18313 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18314 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18316 const int HighMask[] = {1, 5, 3, 7};
18317 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18318 const int LowMask[] = {0, 4, 2, 6};
18319 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18322 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18323 // unsigned multiply.
18324 if (IsSigned && !Subtarget->hasSSE41()) {
18326 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18327 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18328 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18329 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18330 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18332 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18333 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18336 // The first result of MUL_LOHI is actually the low value, followed by the
18338 SDValue Ops[] = {Lows, Highs};
18339 return DAG.getMergeValues(Ops, dl);
18342 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18343 const X86Subtarget *Subtarget) {
18344 MVT VT = Op.getSimpleValueType();
18346 SDValue R = Op.getOperand(0);
18347 SDValue Amt = Op.getOperand(1);
18349 // Optimize shl/srl/sra with constant shift amount.
18350 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18351 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18352 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18354 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18355 (Subtarget->hasInt256() &&
18356 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18357 (Subtarget->hasAVX512() &&
18358 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18359 if (Op.getOpcode() == ISD::SHL)
18360 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18362 if (Op.getOpcode() == ISD::SRL)
18363 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18365 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18366 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18370 if (VT == MVT::v16i8) {
18371 if (Op.getOpcode() == ISD::SHL) {
18372 // Make a large shift.
18373 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18374 MVT::v8i16, R, ShiftAmt,
18376 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18377 // Zero out the rightmost bits.
18378 SmallVector<SDValue, 16> V(16,
18379 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18381 return DAG.getNode(ISD::AND, dl, VT, SHL,
18382 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18384 if (Op.getOpcode() == ISD::SRL) {
18385 // Make a large shift.
18386 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18387 MVT::v8i16, R, ShiftAmt,
18389 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18390 // Zero out the leftmost bits.
18391 SmallVector<SDValue, 16> V(16,
18392 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18394 return DAG.getNode(ISD::AND, dl, VT, SRL,
18395 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18397 if (Op.getOpcode() == ISD::SRA) {
18398 if (ShiftAmt == 7) {
18399 // R s>> 7 === R s< 0
18400 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18401 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18404 // R s>> a === ((R u>> a) ^ m) - m
18405 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18406 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18408 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18409 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18410 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18413 llvm_unreachable("Unknown shift opcode.");
18416 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18417 if (Op.getOpcode() == ISD::SHL) {
18418 // Make a large shift.
18419 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18420 MVT::v16i16, R, ShiftAmt,
18422 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18423 // Zero out the rightmost bits.
18424 SmallVector<SDValue, 32> V(32,
18425 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18427 return DAG.getNode(ISD::AND, dl, VT, SHL,
18428 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18430 if (Op.getOpcode() == ISD::SRL) {
18431 // Make a large shift.
18432 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18433 MVT::v16i16, R, ShiftAmt,
18435 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18436 // Zero out the leftmost bits.
18437 SmallVector<SDValue, 32> V(32,
18438 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18440 return DAG.getNode(ISD::AND, dl, VT, SRL,
18441 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18443 if (Op.getOpcode() == ISD::SRA) {
18444 if (ShiftAmt == 7) {
18445 // R s>> 7 === R s< 0
18446 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18447 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18450 // R s>> a === ((R u>> a) ^ m) - m
18451 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18452 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18454 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18455 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18456 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18459 llvm_unreachable("Unknown shift opcode.");
18464 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18465 if (!Subtarget->is64Bit() &&
18466 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18467 Amt.getOpcode() == ISD::BITCAST &&
18468 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18469 Amt = Amt.getOperand(0);
18470 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18471 VT.getVectorNumElements();
18472 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18473 uint64_t ShiftAmt = 0;
18474 for (unsigned i = 0; i != Ratio; ++i) {
18475 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18479 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18481 // Check remaining shift amounts.
18482 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18483 uint64_t ShAmt = 0;
18484 for (unsigned j = 0; j != Ratio; ++j) {
18485 ConstantSDNode *C =
18486 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18490 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18492 if (ShAmt != ShiftAmt)
18495 switch (Op.getOpcode()) {
18497 llvm_unreachable("Unknown shift opcode!");
18499 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18502 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18505 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18513 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18514 const X86Subtarget* Subtarget) {
18515 MVT VT = Op.getSimpleValueType();
18517 SDValue R = Op.getOperand(0);
18518 SDValue Amt = Op.getOperand(1);
18520 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
18521 VT == MVT::v4i32 || VT == MVT::v8i16 ||
18522 (Subtarget->hasInt256() &&
18523 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
18524 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18525 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18527 EVT EltVT = VT.getVectorElementType();
18529 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18530 // Check if this build_vector node is doing a splat.
18531 // If so, then set BaseShAmt equal to the splat value.
18532 BaseShAmt = BV->getSplatValue();
18533 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18534 BaseShAmt = SDValue();
18536 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18537 Amt = Amt.getOperand(0);
18539 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18540 if (SVN && SVN->isSplat()) {
18541 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18542 SDValue InVec = Amt.getOperand(0);
18543 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18544 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
18545 "Unexpected shuffle index found!");
18546 BaseShAmt = InVec.getOperand(SplatIdx);
18547 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
18548 if (ConstantSDNode *C =
18549 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
18550 if (C->getZExtValue() == SplatIdx)
18551 BaseShAmt = InVec.getOperand(1);
18556 // Avoid introducing an extract element from a shuffle.
18557 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
18558 DAG.getIntPtrConstant(SplatIdx));
18562 if (BaseShAmt.getNode()) {
18563 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
18564 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
18565 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
18566 else if (EltVT.bitsLT(MVT::i32))
18567 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
18569 switch (Op.getOpcode()) {
18571 llvm_unreachable("Unknown shift opcode!");
18573 switch (VT.SimpleTy) {
18574 default: return SDValue();
18583 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
18586 switch (VT.SimpleTy) {
18587 default: return SDValue();
18594 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
18597 switch (VT.SimpleTy) {
18598 default: return SDValue();
18607 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
18613 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18614 if (!Subtarget->is64Bit() &&
18615 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
18616 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
18617 Amt.getOpcode() == ISD::BITCAST &&
18618 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18619 Amt = Amt.getOperand(0);
18620 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18621 VT.getVectorNumElements();
18622 std::vector<SDValue> Vals(Ratio);
18623 for (unsigned i = 0; i != Ratio; ++i)
18624 Vals[i] = Amt.getOperand(i);
18625 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18626 for (unsigned j = 0; j != Ratio; ++j)
18627 if (Vals[j] != Amt.getOperand(i + j))
18630 switch (Op.getOpcode()) {
18632 llvm_unreachable("Unknown shift opcode!");
18634 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
18636 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
18638 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
18645 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
18646 SelectionDAG &DAG) {
18647 MVT VT = Op.getSimpleValueType();
18649 SDValue R = Op.getOperand(0);
18650 SDValue Amt = Op.getOperand(1);
18653 assert(VT.isVector() && "Custom lowering only for vector shifts!");
18654 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
18656 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
18660 V = LowerScalarVariableShift(Op, DAG, Subtarget);
18664 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
18666 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
18667 if (Subtarget->hasInt256()) {
18668 if (Op.getOpcode() == ISD::SRL &&
18669 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
18670 VT == MVT::v4i64 || VT == MVT::v8i32))
18672 if (Op.getOpcode() == ISD::SHL &&
18673 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
18674 VT == MVT::v4i64 || VT == MVT::v8i32))
18676 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
18680 // If possible, lower this packed shift into a vector multiply instead of
18681 // expanding it into a sequence of scalar shifts.
18682 // Do this only if the vector shift count is a constant build_vector.
18683 if (Op.getOpcode() == ISD::SHL &&
18684 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
18685 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
18686 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18687 SmallVector<SDValue, 8> Elts;
18688 EVT SVT = VT.getScalarType();
18689 unsigned SVTBits = SVT.getSizeInBits();
18690 const APInt &One = APInt(SVTBits, 1);
18691 unsigned NumElems = VT.getVectorNumElements();
18693 for (unsigned i=0; i !=NumElems; ++i) {
18694 SDValue Op = Amt->getOperand(i);
18695 if (Op->getOpcode() == ISD::UNDEF) {
18696 Elts.push_back(Op);
18700 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
18701 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
18702 uint64_t ShAmt = C.getZExtValue();
18703 if (ShAmt >= SVTBits) {
18704 Elts.push_back(DAG.getUNDEF(SVT));
18707 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
18709 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
18710 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
18713 // Lower SHL with variable shift amount.
18714 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
18715 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
18717 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
18718 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
18719 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
18720 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
18723 // If possible, lower this shift as a sequence of two shifts by
18724 // constant plus a MOVSS/MOVSD instead of scalarizing it.
18726 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
18728 // Could be rewritten as:
18729 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
18731 // The advantage is that the two shifts from the example would be
18732 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
18733 // the vector shift into four scalar shifts plus four pairs of vector
18735 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
18736 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
18737 unsigned TargetOpcode = X86ISD::MOVSS;
18738 bool CanBeSimplified;
18739 // The splat value for the first packed shift (the 'X' from the example).
18740 SDValue Amt1 = Amt->getOperand(0);
18741 // The splat value for the second packed shift (the 'Y' from the example).
18742 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
18743 Amt->getOperand(2);
18745 // See if it is possible to replace this node with a sequence of
18746 // two shifts followed by a MOVSS/MOVSD
18747 if (VT == MVT::v4i32) {
18748 // Check if it is legal to use a MOVSS.
18749 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
18750 Amt2 == Amt->getOperand(3);
18751 if (!CanBeSimplified) {
18752 // Otherwise, check if we can still simplify this node using a MOVSD.
18753 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
18754 Amt->getOperand(2) == Amt->getOperand(3);
18755 TargetOpcode = X86ISD::MOVSD;
18756 Amt2 = Amt->getOperand(2);
18759 // Do similar checks for the case where the machine value type
18761 CanBeSimplified = Amt1 == Amt->getOperand(1);
18762 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
18763 CanBeSimplified = Amt2 == Amt->getOperand(i);
18765 if (!CanBeSimplified) {
18766 TargetOpcode = X86ISD::MOVSD;
18767 CanBeSimplified = true;
18768 Amt2 = Amt->getOperand(4);
18769 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
18770 CanBeSimplified = Amt1 == Amt->getOperand(i);
18771 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
18772 CanBeSimplified = Amt2 == Amt->getOperand(j);
18776 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
18777 isa<ConstantSDNode>(Amt2)) {
18778 // Replace this node with two shifts followed by a MOVSS/MOVSD.
18779 EVT CastVT = MVT::v4i32;
18781 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
18782 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
18784 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
18785 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
18786 if (TargetOpcode == X86ISD::MOVSD)
18787 CastVT = MVT::v2i64;
18788 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
18789 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
18790 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
18792 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
18796 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
18797 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
18800 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
18801 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
18803 // Turn 'a' into a mask suitable for VSELECT
18804 SDValue VSelM = DAG.getConstant(0x80, VT);
18805 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
18806 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
18808 SDValue CM1 = DAG.getConstant(0x0f, VT);
18809 SDValue CM2 = DAG.getConstant(0x3f, VT);
18811 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
18812 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
18813 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
18814 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
18815 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
18818 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
18819 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
18820 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
18822 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
18823 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
18824 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
18825 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
18826 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
18829 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
18830 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
18831 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
18833 // return VSELECT(r, r+r, a);
18834 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
18835 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
18839 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
18840 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
18841 // solution better.
18842 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
18843 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
18845 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
18846 R = DAG.getNode(ExtOpc, dl, NewVT, R);
18847 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
18848 return DAG.getNode(ISD::TRUNCATE, dl, VT,
18849 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
18852 // Decompose 256-bit shifts into smaller 128-bit shifts.
18853 if (VT.is256BitVector()) {
18854 unsigned NumElems = VT.getVectorNumElements();
18855 MVT EltVT = VT.getVectorElementType();
18856 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18858 // Extract the two vectors
18859 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
18860 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
18862 // Recreate the shift amount vectors
18863 SDValue Amt1, Amt2;
18864 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
18865 // Constant shift amount
18866 SmallVector<SDValue, 4> Amt1Csts;
18867 SmallVector<SDValue, 4> Amt2Csts;
18868 for (unsigned i = 0; i != NumElems/2; ++i)
18869 Amt1Csts.push_back(Amt->getOperand(i));
18870 for (unsigned i = NumElems/2; i != NumElems; ++i)
18871 Amt2Csts.push_back(Amt->getOperand(i));
18873 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
18874 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
18876 // Variable shift amount
18877 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
18878 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
18881 // Issue new vector shifts for the smaller types
18882 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
18883 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
18885 // Concatenate the result back
18886 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
18892 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
18893 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
18894 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
18895 // looks for this combo and may remove the "setcc" instruction if the "setcc"
18896 // has only one use.
18897 SDNode *N = Op.getNode();
18898 SDValue LHS = N->getOperand(0);
18899 SDValue RHS = N->getOperand(1);
18900 unsigned BaseOp = 0;
18903 switch (Op.getOpcode()) {
18904 default: llvm_unreachable("Unknown ovf instruction!");
18906 // A subtract of one will be selected as a INC. Note that INC doesn't
18907 // set CF, so we can't do this for UADDO.
18908 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
18910 BaseOp = X86ISD::INC;
18911 Cond = X86::COND_O;
18914 BaseOp = X86ISD::ADD;
18915 Cond = X86::COND_O;
18918 BaseOp = X86ISD::ADD;
18919 Cond = X86::COND_B;
18922 // A subtract of one will be selected as a DEC. Note that DEC doesn't
18923 // set CF, so we can't do this for USUBO.
18924 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
18926 BaseOp = X86ISD::DEC;
18927 Cond = X86::COND_O;
18930 BaseOp = X86ISD::SUB;
18931 Cond = X86::COND_O;
18934 BaseOp = X86ISD::SUB;
18935 Cond = X86::COND_B;
18938 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
18939 Cond = X86::COND_O;
18941 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
18942 if (N->getValueType(0) == MVT::i8) {
18943 BaseOp = X86ISD::UMUL8;
18944 Cond = X86::COND_O;
18947 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
18949 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
18952 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
18953 DAG.getConstant(X86::COND_O, MVT::i32),
18954 SDValue(Sum.getNode(), 2));
18956 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
18960 // Also sets EFLAGS.
18961 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
18962 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
18965 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
18966 DAG.getConstant(Cond, MVT::i32),
18967 SDValue(Sum.getNode(), 1));
18969 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
18972 // Sign extension of the low part of vector elements. This may be used either
18973 // when sign extend instructions are not available or if the vector element
18974 // sizes already match the sign-extended size. If the vector elements are in
18975 // their pre-extended size and sign extend instructions are available, that will
18976 // be handled by LowerSIGN_EXTEND.
18977 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
18978 SelectionDAG &DAG) const {
18980 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
18981 MVT VT = Op.getSimpleValueType();
18983 if (!Subtarget->hasSSE2() || !VT.isVector())
18986 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
18987 ExtraVT.getScalarType().getSizeInBits();
18989 switch (VT.SimpleTy) {
18990 default: return SDValue();
18993 if (!Subtarget->hasFp256())
18995 if (!Subtarget->hasInt256()) {
18996 // needs to be split
18997 unsigned NumElems = VT.getVectorNumElements();
18999 // Extract the LHS vectors
19000 SDValue LHS = Op.getOperand(0);
19001 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19002 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19004 MVT EltVT = VT.getVectorElementType();
19005 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19007 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19008 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19009 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19011 SDValue Extra = DAG.getValueType(ExtraVT);
19013 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19014 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19016 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19021 SDValue Op0 = Op.getOperand(0);
19023 // This is a sign extension of some low part of vector elements without
19024 // changing the size of the vector elements themselves:
19025 // Shift-Left + Shift-Right-Algebraic.
19026 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19028 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19034 /// Returns true if the operand type is exactly twice the native width, and
19035 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19036 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19037 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19038 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19039 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19042 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19043 else if (OpWidth == 128)
19044 return Subtarget->hasCmpxchg16b();
19049 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19050 return needsCmpXchgNb(SI->getValueOperand()->getType());
19053 // Note: this turns large loads into lock cmpxchg8b/16b.
19054 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19055 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19056 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19057 return needsCmpXchgNb(PTy->getElementType());
19060 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19061 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19062 const Type *MemType = AI->getType();
19064 // If the operand is too big, we must see if cmpxchg8/16b is available
19065 // and default to library calls otherwise.
19066 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19067 return needsCmpXchgNb(MemType);
19069 AtomicRMWInst::BinOp Op = AI->getOperation();
19072 llvm_unreachable("Unknown atomic operation");
19073 case AtomicRMWInst::Xchg:
19074 case AtomicRMWInst::Add:
19075 case AtomicRMWInst::Sub:
19076 // It's better to use xadd, xsub or xchg for these in all cases.
19078 case AtomicRMWInst::Or:
19079 case AtomicRMWInst::And:
19080 case AtomicRMWInst::Xor:
19081 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19082 // prefix to a normal instruction for these operations.
19083 return !AI->use_empty();
19084 case AtomicRMWInst::Nand:
19085 case AtomicRMWInst::Max:
19086 case AtomicRMWInst::Min:
19087 case AtomicRMWInst::UMax:
19088 case AtomicRMWInst::UMin:
19089 // These always require a non-trivial set of data operations on x86. We must
19090 // use a cmpxchg loop.
19095 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19096 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19097 // no-sse2). There isn't any reason to disable it if the target processor
19099 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19103 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19104 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19105 const Type *MemType = AI->getType();
19106 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19107 // there is no benefit in turning such RMWs into loads, and it is actually
19108 // harmful as it introduces a mfence.
19109 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19112 auto Builder = IRBuilder<>(AI);
19113 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19114 auto SynchScope = AI->getSynchScope();
19115 // We must restrict the ordering to avoid generating loads with Release or
19116 // ReleaseAcquire orderings.
19117 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19118 auto Ptr = AI->getPointerOperand();
19120 // Before the load we need a fence. Here is an example lifted from
19121 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19124 // x.store(1, relaxed);
19125 // r1 = y.fetch_add(0, release);
19127 // y.fetch_add(42, acquire);
19128 // r2 = x.load(relaxed);
19129 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19130 // lowered to just a load without a fence. A mfence flushes the store buffer,
19131 // making the optimization clearly correct.
19132 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19133 // otherwise, we might be able to be more agressive on relaxed idempotent
19134 // rmw. In practice, they do not look useful, so we don't try to be
19135 // especially clever.
19136 if (SynchScope == SingleThread) {
19137 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19138 // the IR level, so we must wrap it in an intrinsic.
19140 } else if (hasMFENCE(*Subtarget)) {
19141 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19142 Intrinsic::x86_sse2_mfence);
19143 Builder.CreateCall(MFence);
19145 // FIXME: it might make sense to use a locked operation here but on a
19146 // different cache-line to prevent cache-line bouncing. In practice it
19147 // is probably a small win, and x86 processors without mfence are rare
19148 // enough that we do not bother.
19152 // Finally we can emit the atomic load.
19153 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19154 AI->getType()->getPrimitiveSizeInBits());
19155 Loaded->setAtomic(Order, SynchScope);
19156 AI->replaceAllUsesWith(Loaded);
19157 AI->eraseFromParent();
19161 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19162 SelectionDAG &DAG) {
19164 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19165 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19166 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19167 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19169 // The only fence that needs an instruction is a sequentially-consistent
19170 // cross-thread fence.
19171 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19172 if (hasMFENCE(*Subtarget))
19173 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19175 SDValue Chain = Op.getOperand(0);
19176 SDValue Zero = DAG.getConstant(0, MVT::i32);
19178 DAG.getRegister(X86::ESP, MVT::i32), // Base
19179 DAG.getTargetConstant(1, MVT::i8), // Scale
19180 DAG.getRegister(0, MVT::i32), // Index
19181 DAG.getTargetConstant(0, MVT::i32), // Disp
19182 DAG.getRegister(0, MVT::i32), // Segment.
19186 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19187 return SDValue(Res, 0);
19190 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19191 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19194 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19195 SelectionDAG &DAG) {
19196 MVT T = Op.getSimpleValueType();
19200 switch(T.SimpleTy) {
19201 default: llvm_unreachable("Invalid value type!");
19202 case MVT::i8: Reg = X86::AL; size = 1; break;
19203 case MVT::i16: Reg = X86::AX; size = 2; break;
19204 case MVT::i32: Reg = X86::EAX; size = 4; break;
19206 assert(Subtarget->is64Bit() && "Node not type legal!");
19207 Reg = X86::RAX; size = 8;
19210 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19211 Op.getOperand(2), SDValue());
19212 SDValue Ops[] = { cpIn.getValue(0),
19215 DAG.getTargetConstant(size, MVT::i8),
19216 cpIn.getValue(1) };
19217 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19218 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19219 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19223 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19224 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19225 MVT::i32, cpOut.getValue(2));
19226 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19227 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19229 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19230 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19231 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19235 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19236 SelectionDAG &DAG) {
19237 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19238 MVT DstVT = Op.getSimpleValueType();
19240 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19241 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19242 if (DstVT != MVT::f64)
19243 // This conversion needs to be expanded.
19246 SDValue InVec = Op->getOperand(0);
19248 unsigned NumElts = SrcVT.getVectorNumElements();
19249 EVT SVT = SrcVT.getVectorElementType();
19251 // Widen the vector in input in the case of MVT::v2i32.
19252 // Example: from MVT::v2i32 to MVT::v4i32.
19253 SmallVector<SDValue, 16> Elts;
19254 for (unsigned i = 0, e = NumElts; i != e; ++i)
19255 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19256 DAG.getIntPtrConstant(i)));
19258 // Explicitly mark the extra elements as Undef.
19259 SDValue Undef = DAG.getUNDEF(SVT);
19260 for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
19261 Elts.push_back(Undef);
19263 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19264 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19265 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19266 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19267 DAG.getIntPtrConstant(0));
19270 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19271 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19272 assert((DstVT == MVT::i64 ||
19273 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19274 "Unexpected custom BITCAST");
19275 // i64 <=> MMX conversions are Legal.
19276 if (SrcVT==MVT::i64 && DstVT.isVector())
19278 if (DstVT==MVT::i64 && SrcVT.isVector())
19280 // MMX <=> MMX conversions are Legal.
19281 if (SrcVT.isVector() && DstVT.isVector())
19283 // All other conversions need to be expanded.
19287 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19288 SelectionDAG &DAG) {
19289 SDNode *Node = Op.getNode();
19292 Op = Op.getOperand(0);
19293 EVT VT = Op.getValueType();
19294 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19295 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19297 unsigned NumElts = VT.getVectorNumElements();
19298 EVT EltVT = VT.getVectorElementType();
19299 unsigned Len = EltVT.getSizeInBits();
19301 // This is the vectorized version of the "best" algorithm from
19302 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19303 // with a minor tweak to use a series of adds + shifts instead of vector
19304 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19306 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19307 // v8i32 => Always profitable
19309 // FIXME: There a couple of possible improvements:
19311 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19312 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19314 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19315 "CTPOP not implemented for this vector element type.");
19317 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19318 // extra legalization.
19319 bool NeedsBitcast = EltVT == MVT::i32;
19320 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19322 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19323 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19324 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19326 // v = v - ((v >> 1) & 0x55555555...)
19327 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19328 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19329 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19331 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19333 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19334 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19336 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19338 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19339 if (VT != And.getValueType())
19340 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19341 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19343 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19344 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19345 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19346 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19347 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19349 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19350 if (NeedsBitcast) {
19351 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19352 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19353 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19356 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19357 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19358 if (VT != AndRHS.getValueType()) {
19359 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19360 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19362 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19364 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19365 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19366 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19367 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19368 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19370 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19371 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19372 if (NeedsBitcast) {
19373 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19374 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19376 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19377 if (VT != And.getValueType())
19378 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19380 // The algorithm mentioned above uses:
19381 // v = (v * 0x01010101...) >> (Len - 8)
19383 // Change it to use vector adds + vector shifts which yield faster results on
19384 // Haswell than using vector integer multiplication.
19386 // For i32 elements:
19387 // v = v + (v >> 8)
19388 // v = v + (v >> 16)
19390 // For i64 elements:
19391 // v = v + (v >> 8)
19392 // v = v + (v >> 16)
19393 // v = v + (v >> 32)
19396 SmallVector<SDValue, 8> Csts;
19397 for (unsigned i = 8; i <= Len/2; i *= 2) {
19398 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19399 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19400 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19401 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19405 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19406 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19407 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19408 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19409 if (NeedsBitcast) {
19410 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19411 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19413 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19414 if (VT != And.getValueType())
19415 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19420 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19421 SDNode *Node = Op.getNode();
19423 EVT T = Node->getValueType(0);
19424 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19425 DAG.getConstant(0, T), Node->getOperand(2));
19426 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19427 cast<AtomicSDNode>(Node)->getMemoryVT(),
19428 Node->getOperand(0),
19429 Node->getOperand(1), negOp,
19430 cast<AtomicSDNode>(Node)->getMemOperand(),
19431 cast<AtomicSDNode>(Node)->getOrdering(),
19432 cast<AtomicSDNode>(Node)->getSynchScope());
19435 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19436 SDNode *Node = Op.getNode();
19438 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19440 // Convert seq_cst store -> xchg
19441 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19442 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19443 // (The only way to get a 16-byte store is cmpxchg16b)
19444 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19445 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19446 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19447 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19448 cast<AtomicSDNode>(Node)->getMemoryVT(),
19449 Node->getOperand(0),
19450 Node->getOperand(1), Node->getOperand(2),
19451 cast<AtomicSDNode>(Node)->getMemOperand(),
19452 cast<AtomicSDNode>(Node)->getOrdering(),
19453 cast<AtomicSDNode>(Node)->getSynchScope());
19454 return Swap.getValue(1);
19456 // Other atomic stores have a simple pattern.
19460 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19461 EVT VT = Op.getNode()->getSimpleValueType(0);
19463 // Let legalize expand this if it isn't a legal type yet.
19464 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19467 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19470 bool ExtraOp = false;
19471 switch (Op.getOpcode()) {
19472 default: llvm_unreachable("Invalid code");
19473 case ISD::ADDC: Opc = X86ISD::ADD; break;
19474 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19475 case ISD::SUBC: Opc = X86ISD::SUB; break;
19476 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19480 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19482 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19483 Op.getOperand(1), Op.getOperand(2));
19486 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19487 SelectionDAG &DAG) {
19488 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19490 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19491 // which returns the values as { float, float } (in XMM0) or
19492 // { double, double } (which is returned in XMM0, XMM1).
19494 SDValue Arg = Op.getOperand(0);
19495 EVT ArgVT = Arg.getValueType();
19496 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19498 TargetLowering::ArgListTy Args;
19499 TargetLowering::ArgListEntry Entry;
19503 Entry.isSExt = false;
19504 Entry.isZExt = false;
19505 Args.push_back(Entry);
19507 bool isF64 = ArgVT == MVT::f64;
19508 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19509 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19510 // the results are returned via SRet in memory.
19511 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19512 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19513 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
19515 Type *RetTy = isF64
19516 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19517 : (Type*)VectorType::get(ArgTy, 4);
19519 TargetLowering::CallLoweringInfo CLI(DAG);
19520 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19521 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19523 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19526 // Returned in xmm0 and xmm1.
19527 return CallResult.first;
19529 // Returned in bits 0:31 and 32:64 xmm0.
19530 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19531 CallResult.first, DAG.getIntPtrConstant(0));
19532 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19533 CallResult.first, DAG.getIntPtrConstant(1));
19534 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19535 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19538 /// LowerOperation - Provide custom lowering hooks for some operations.
19540 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
19541 switch (Op.getOpcode()) {
19542 default: llvm_unreachable("Should not custom lower this!");
19543 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
19544 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
19545 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
19546 return LowerCMP_SWAP(Op, Subtarget, DAG);
19547 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
19548 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
19549 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
19550 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
19551 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
19552 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
19553 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
19554 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
19555 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
19556 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
19557 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
19558 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
19559 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
19560 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
19561 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
19562 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
19563 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
19564 case ISD::SHL_PARTS:
19565 case ISD::SRA_PARTS:
19566 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
19567 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
19568 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
19569 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
19570 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
19571 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
19572 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
19573 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
19574 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
19575 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
19576 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
19578 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
19579 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
19580 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
19581 case ISD::SETCC: return LowerSETCC(Op, DAG);
19582 case ISD::SELECT: return LowerSELECT(Op, DAG);
19583 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
19584 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
19585 case ISD::VASTART: return LowerVASTART(Op, DAG);
19586 case ISD::VAARG: return LowerVAARG(Op, DAG);
19587 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
19588 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
19589 case ISD::INTRINSIC_VOID:
19590 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
19591 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
19592 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
19593 case ISD::FRAME_TO_ARGS_OFFSET:
19594 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
19595 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
19596 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
19597 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
19598 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
19599 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
19600 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
19601 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
19602 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
19603 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
19604 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
19605 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
19606 case ISD::UMUL_LOHI:
19607 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
19610 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
19616 case ISD::UMULO: return LowerXALUO(Op, DAG);
19617 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
19618 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
19622 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
19623 case ISD::ADD: return LowerADD(Op, DAG);
19624 case ISD::SUB: return LowerSUB(Op, DAG);
19625 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
19629 /// ReplaceNodeResults - Replace a node with an illegal result type
19630 /// with a new node built out of custom code.
19631 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
19632 SmallVectorImpl<SDValue>&Results,
19633 SelectionDAG &DAG) const {
19635 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19636 switch (N->getOpcode()) {
19638 llvm_unreachable("Do not know how to custom type legalize this operation!");
19639 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
19640 case X86ISD::FMINC:
19642 case X86ISD::FMAXC:
19643 case X86ISD::FMAX: {
19644 EVT VT = N->getValueType(0);
19645 if (VT != MVT::v2f32)
19646 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
19647 SDValue UNDEF = DAG.getUNDEF(VT);
19648 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
19649 N->getOperand(0), UNDEF);
19650 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
19651 N->getOperand(1), UNDEF);
19652 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
19655 case ISD::SIGN_EXTEND_INREG:
19660 // We don't want to expand or promote these.
19667 case ISD::UDIVREM: {
19668 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
19669 Results.push_back(V);
19672 case ISD::FP_TO_SINT:
19673 case ISD::FP_TO_UINT: {
19674 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
19676 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
19679 std::pair<SDValue,SDValue> Vals =
19680 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
19681 SDValue FIST = Vals.first, StackSlot = Vals.second;
19682 if (FIST.getNode()) {
19683 EVT VT = N->getValueType(0);
19684 // Return a load from the stack slot.
19685 if (StackSlot.getNode())
19686 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
19687 MachinePointerInfo(),
19688 false, false, false, 0));
19690 Results.push_back(FIST);
19694 case ISD::UINT_TO_FP: {
19695 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19696 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
19697 N->getValueType(0) != MVT::v2f32)
19699 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
19701 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
19703 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
19704 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
19705 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
19706 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
19707 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
19708 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
19711 case ISD::FP_ROUND: {
19712 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
19714 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
19715 Results.push_back(V);
19718 case ISD::INTRINSIC_W_CHAIN: {
19719 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
19721 default : llvm_unreachable("Do not know how to custom type "
19722 "legalize this intrinsic operation!");
19723 case Intrinsic::x86_rdtsc:
19724 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
19726 case Intrinsic::x86_rdtscp:
19727 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
19729 case Intrinsic::x86_rdpmc:
19730 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
19733 case ISD::READCYCLECOUNTER: {
19734 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
19737 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
19738 EVT T = N->getValueType(0);
19739 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
19740 bool Regs64bit = T == MVT::i128;
19741 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
19742 SDValue cpInL, cpInH;
19743 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
19744 DAG.getConstant(0, HalfT));
19745 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
19746 DAG.getConstant(1, HalfT));
19747 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
19748 Regs64bit ? X86::RAX : X86::EAX,
19750 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
19751 Regs64bit ? X86::RDX : X86::EDX,
19752 cpInH, cpInL.getValue(1));
19753 SDValue swapInL, swapInH;
19754 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
19755 DAG.getConstant(0, HalfT));
19756 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
19757 DAG.getConstant(1, HalfT));
19758 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
19759 Regs64bit ? X86::RBX : X86::EBX,
19760 swapInL, cpInH.getValue(1));
19761 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
19762 Regs64bit ? X86::RCX : X86::ECX,
19763 swapInH, swapInL.getValue(1));
19764 SDValue Ops[] = { swapInH.getValue(0),
19766 swapInH.getValue(1) };
19767 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19768 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
19769 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
19770 X86ISD::LCMPXCHG8_DAG;
19771 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
19772 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
19773 Regs64bit ? X86::RAX : X86::EAX,
19774 HalfT, Result.getValue(1));
19775 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
19776 Regs64bit ? X86::RDX : X86::EDX,
19777 HalfT, cpOutL.getValue(2));
19778 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
19780 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
19781 MVT::i32, cpOutH.getValue(2));
19783 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
19784 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19785 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
19787 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
19788 Results.push_back(Success);
19789 Results.push_back(EFLAGS.getValue(1));
19792 case ISD::ATOMIC_SWAP:
19793 case ISD::ATOMIC_LOAD_ADD:
19794 case ISD::ATOMIC_LOAD_SUB:
19795 case ISD::ATOMIC_LOAD_AND:
19796 case ISD::ATOMIC_LOAD_OR:
19797 case ISD::ATOMIC_LOAD_XOR:
19798 case ISD::ATOMIC_LOAD_NAND:
19799 case ISD::ATOMIC_LOAD_MIN:
19800 case ISD::ATOMIC_LOAD_MAX:
19801 case ISD::ATOMIC_LOAD_UMIN:
19802 case ISD::ATOMIC_LOAD_UMAX:
19803 case ISD::ATOMIC_LOAD: {
19804 // Delegate to generic TypeLegalization. Situations we can really handle
19805 // should have already been dealt with by AtomicExpandPass.cpp.
19808 case ISD::BITCAST: {
19809 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19810 EVT DstVT = N->getValueType(0);
19811 EVT SrcVT = N->getOperand(0)->getValueType(0);
19813 if (SrcVT != MVT::f64 ||
19814 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
19817 unsigned NumElts = DstVT.getVectorNumElements();
19818 EVT SVT = DstVT.getVectorElementType();
19819 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19820 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
19821 MVT::v2f64, N->getOperand(0));
19822 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
19824 if (ExperimentalVectorWideningLegalization) {
19825 // If we are legalizing vectors by widening, we already have the desired
19826 // legal vector type, just return it.
19827 Results.push_back(ToVecInt);
19831 SmallVector<SDValue, 8> Elts;
19832 for (unsigned i = 0, e = NumElts; i != e; ++i)
19833 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
19834 ToVecInt, DAG.getIntPtrConstant(i)));
19836 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
19841 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
19843 default: return nullptr;
19844 case X86ISD::BSF: return "X86ISD::BSF";
19845 case X86ISD::BSR: return "X86ISD::BSR";
19846 case X86ISD::SHLD: return "X86ISD::SHLD";
19847 case X86ISD::SHRD: return "X86ISD::SHRD";
19848 case X86ISD::FAND: return "X86ISD::FAND";
19849 case X86ISD::FANDN: return "X86ISD::FANDN";
19850 case X86ISD::FOR: return "X86ISD::FOR";
19851 case X86ISD::FXOR: return "X86ISD::FXOR";
19852 case X86ISD::FSRL: return "X86ISD::FSRL";
19853 case X86ISD::FILD: return "X86ISD::FILD";
19854 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
19855 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
19856 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
19857 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
19858 case X86ISD::FLD: return "X86ISD::FLD";
19859 case X86ISD::FST: return "X86ISD::FST";
19860 case X86ISD::CALL: return "X86ISD::CALL";
19861 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
19862 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
19863 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
19864 case X86ISD::BT: return "X86ISD::BT";
19865 case X86ISD::CMP: return "X86ISD::CMP";
19866 case X86ISD::COMI: return "X86ISD::COMI";
19867 case X86ISD::UCOMI: return "X86ISD::UCOMI";
19868 case X86ISD::CMPM: return "X86ISD::CMPM";
19869 case X86ISD::CMPMU: return "X86ISD::CMPMU";
19870 case X86ISD::SETCC: return "X86ISD::SETCC";
19871 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
19872 case X86ISD::FSETCC: return "X86ISD::FSETCC";
19873 case X86ISD::CMOV: return "X86ISD::CMOV";
19874 case X86ISD::BRCOND: return "X86ISD::BRCOND";
19875 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
19876 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
19877 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
19878 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
19879 case X86ISD::Wrapper: return "X86ISD::Wrapper";
19880 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
19881 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
19882 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
19883 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
19884 case X86ISD::PINSRB: return "X86ISD::PINSRB";
19885 case X86ISD::PINSRW: return "X86ISD::PINSRW";
19886 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
19887 case X86ISD::ANDNP: return "X86ISD::ANDNP";
19888 case X86ISD::PSIGN: return "X86ISD::PSIGN";
19889 case X86ISD::BLENDI: return "X86ISD::BLENDI";
19890 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
19891 case X86ISD::SUBUS: return "X86ISD::SUBUS";
19892 case X86ISD::HADD: return "X86ISD::HADD";
19893 case X86ISD::HSUB: return "X86ISD::HSUB";
19894 case X86ISD::FHADD: return "X86ISD::FHADD";
19895 case X86ISD::FHSUB: return "X86ISD::FHSUB";
19896 case X86ISD::UMAX: return "X86ISD::UMAX";
19897 case X86ISD::UMIN: return "X86ISD::UMIN";
19898 case X86ISD::SMAX: return "X86ISD::SMAX";
19899 case X86ISD::SMIN: return "X86ISD::SMIN";
19900 case X86ISD::FMAX: return "X86ISD::FMAX";
19901 case X86ISD::FMIN: return "X86ISD::FMIN";
19902 case X86ISD::FMAXC: return "X86ISD::FMAXC";
19903 case X86ISD::FMINC: return "X86ISD::FMINC";
19904 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
19905 case X86ISD::FRCP: return "X86ISD::FRCP";
19906 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
19907 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
19908 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
19909 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
19910 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
19911 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
19912 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
19913 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
19914 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
19915 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
19916 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
19917 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
19918 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
19919 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
19920 case X86ISD::VZEXT: return "X86ISD::VZEXT";
19921 case X86ISD::VSEXT: return "X86ISD::VSEXT";
19922 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
19923 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
19924 case X86ISD::VINSERT: return "X86ISD::VINSERT";
19925 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
19926 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
19927 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
19928 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
19929 case X86ISD::VSHL: return "X86ISD::VSHL";
19930 case X86ISD::VSRL: return "X86ISD::VSRL";
19931 case X86ISD::VSRA: return "X86ISD::VSRA";
19932 case X86ISD::VSHLI: return "X86ISD::VSHLI";
19933 case X86ISD::VSRLI: return "X86ISD::VSRLI";
19934 case X86ISD::VSRAI: return "X86ISD::VSRAI";
19935 case X86ISD::CMPP: return "X86ISD::CMPP";
19936 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
19937 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
19938 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
19939 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
19940 case X86ISD::ADD: return "X86ISD::ADD";
19941 case X86ISD::SUB: return "X86ISD::SUB";
19942 case X86ISD::ADC: return "X86ISD::ADC";
19943 case X86ISD::SBB: return "X86ISD::SBB";
19944 case X86ISD::SMUL: return "X86ISD::SMUL";
19945 case X86ISD::UMUL: return "X86ISD::UMUL";
19946 case X86ISD::SMUL8: return "X86ISD::SMUL8";
19947 case X86ISD::UMUL8: return "X86ISD::UMUL8";
19948 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
19949 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
19950 case X86ISD::INC: return "X86ISD::INC";
19951 case X86ISD::DEC: return "X86ISD::DEC";
19952 case X86ISD::OR: return "X86ISD::OR";
19953 case X86ISD::XOR: return "X86ISD::XOR";
19954 case X86ISD::AND: return "X86ISD::AND";
19955 case X86ISD::BEXTR: return "X86ISD::BEXTR";
19956 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
19957 case X86ISD::PTEST: return "X86ISD::PTEST";
19958 case X86ISD::TESTP: return "X86ISD::TESTP";
19959 case X86ISD::TESTM: return "X86ISD::TESTM";
19960 case X86ISD::TESTNM: return "X86ISD::TESTNM";
19961 case X86ISD::KORTEST: return "X86ISD::KORTEST";
19962 case X86ISD::PACKSS: return "X86ISD::PACKSS";
19963 case X86ISD::PACKUS: return "X86ISD::PACKUS";
19964 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
19965 case X86ISD::VALIGN: return "X86ISD::VALIGN";
19966 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
19967 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
19968 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
19969 case X86ISD::SHUFP: return "X86ISD::SHUFP";
19970 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
19971 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
19972 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
19973 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
19974 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
19975 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
19976 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
19977 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
19978 case X86ISD::MOVSD: return "X86ISD::MOVSD";
19979 case X86ISD::MOVSS: return "X86ISD::MOVSS";
19980 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
19981 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
19982 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
19983 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
19984 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
19985 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
19986 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
19987 case X86ISD::VPERMV: return "X86ISD::VPERMV";
19988 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
19989 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
19990 case X86ISD::VPERMI: return "X86ISD::VPERMI";
19991 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
19992 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
19993 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
19994 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
19995 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
19996 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
19997 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
19998 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
19999 case X86ISD::SAHF: return "X86ISD::SAHF";
20000 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20001 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20002 case X86ISD::FMADD: return "X86ISD::FMADD";
20003 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20004 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20005 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20006 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20007 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20008 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20009 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20010 case X86ISD::XTEST: return "X86ISD::XTEST";
20011 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20012 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20013 case X86ISD::SELECT: return "X86ISD::SELECT";
20014 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20015 case X86ISD::RCP28: return "X86ISD::RCP28";
20016 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20020 // isLegalAddressingMode - Return true if the addressing mode represented
20021 // by AM is legal for this target, for a load/store of the specified type.
20022 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20024 // X86 supports extremely general addressing modes.
20025 CodeModel::Model M = getTargetMachine().getCodeModel();
20026 Reloc::Model R = getTargetMachine().getRelocationModel();
20028 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20029 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20034 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20036 // If a reference to this global requires an extra load, we can't fold it.
20037 if (isGlobalStubReference(GVFlags))
20040 // If BaseGV requires a register for the PIC base, we cannot also have a
20041 // BaseReg specified.
20042 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20045 // If lower 4G is not available, then we must use rip-relative addressing.
20046 if ((M != CodeModel::Small || R != Reloc::Static) &&
20047 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20051 switch (AM.Scale) {
20057 // These scales always work.
20062 // These scales are formed with basereg+scalereg. Only accept if there is
20067 default: // Other stuff never works.
20074 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20075 unsigned Bits = Ty->getScalarSizeInBits();
20077 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20078 // particularly cheaper than those without.
20082 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20083 // variable shifts just as cheap as scalar ones.
20084 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20087 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20088 // fully general vector.
20092 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20093 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20095 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20096 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20097 return NumBits1 > NumBits2;
20100 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20101 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20104 if (!isTypeLegal(EVT::getEVT(Ty1)))
20107 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20109 // Assuming the caller doesn't have a zeroext or signext return parameter,
20110 // truncation all the way down to i1 is valid.
20114 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20115 return isInt<32>(Imm);
20118 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20119 // Can also use sub to handle negated immediates.
20120 return isInt<32>(Imm);
20123 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20124 if (!VT1.isInteger() || !VT2.isInteger())
20126 unsigned NumBits1 = VT1.getSizeInBits();
20127 unsigned NumBits2 = VT2.getSizeInBits();
20128 return NumBits1 > NumBits2;
20131 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20132 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20133 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20136 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20137 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20138 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20141 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20142 EVT VT1 = Val.getValueType();
20143 if (isZExtFree(VT1, VT2))
20146 if (Val.getOpcode() != ISD::LOAD)
20149 if (!VT1.isSimple() || !VT1.isInteger() ||
20150 !VT2.isSimple() || !VT2.isInteger())
20153 switch (VT1.getSimpleVT().SimpleTy) {
20158 // X86 has 8, 16, and 32-bit zero-extending loads.
20166 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20167 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20170 VT = VT.getScalarType();
20172 if (!VT.isSimple())
20175 switch (VT.getSimpleVT().SimpleTy) {
20186 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20187 // i16 instructions are longer (0x66 prefix) and potentially slower.
20188 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20191 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20192 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20193 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20194 /// are assumed to be legal.
20196 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20198 if (!VT.isSimple())
20201 MVT SVT = VT.getSimpleVT();
20203 // Very little shuffling can be done for 64-bit vectors right now.
20204 if (VT.getSizeInBits() == 64)
20207 // This is an experimental legality test that is tailored to match the
20208 // legality test of the experimental lowering more closely. They are gated
20209 // separately to ease testing of performance differences.
20210 if (ExperimentalVectorShuffleLegality)
20211 // We only care that the types being shuffled are legal. The lowering can
20212 // handle any possible shuffle mask that results.
20213 return isTypeLegal(SVT);
20215 // If this is a single-input shuffle with no 128 bit lane crossings we can
20216 // lower it into pshufb.
20217 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20218 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20219 bool isLegal = true;
20220 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20221 if (M[I] >= (int)SVT.getVectorNumElements() ||
20222 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20231 // FIXME: blends, shifts.
20232 return (SVT.getVectorNumElements() == 2 ||
20233 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20234 isMOVLMask(M, SVT) ||
20235 isCommutedMOVLMask(M, SVT) ||
20236 isMOVHLPSMask(M, SVT) ||
20237 isSHUFPMask(M, SVT) ||
20238 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20239 isPSHUFDMask(M, SVT) ||
20240 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20241 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20242 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20243 isPALIGNRMask(M, SVT, Subtarget) ||
20244 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20245 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20246 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20247 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20248 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20249 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20253 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20255 if (!VT.isSimple())
20258 MVT SVT = VT.getSimpleVT();
20260 // This is an experimental legality test that is tailored to match the
20261 // legality test of the experimental lowering more closely. They are gated
20262 // separately to ease testing of performance differences.
20263 if (ExperimentalVectorShuffleLegality)
20264 // The new vector shuffle lowering is very good at managing zero-inputs.
20265 return isShuffleMaskLegal(Mask, VT);
20267 unsigned NumElts = SVT.getVectorNumElements();
20268 // FIXME: This collection of masks seems suspect.
20271 if (NumElts == 4 && SVT.is128BitVector()) {
20272 return (isMOVLMask(Mask, SVT) ||
20273 isCommutedMOVLMask(Mask, SVT, true) ||
20274 isSHUFPMask(Mask, SVT) ||
20275 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20276 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20277 Subtarget->hasInt256()));
20282 //===----------------------------------------------------------------------===//
20283 // X86 Scheduler Hooks
20284 //===----------------------------------------------------------------------===//
20286 /// Utility function to emit xbegin specifying the start of an RTM region.
20287 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20288 const TargetInstrInfo *TII) {
20289 DebugLoc DL = MI->getDebugLoc();
20291 const BasicBlock *BB = MBB->getBasicBlock();
20292 MachineFunction::iterator I = MBB;
20295 // For the v = xbegin(), we generate
20306 MachineBasicBlock *thisMBB = MBB;
20307 MachineFunction *MF = MBB->getParent();
20308 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20309 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20310 MF->insert(I, mainMBB);
20311 MF->insert(I, sinkMBB);
20313 // Transfer the remainder of BB and its successor edges to sinkMBB.
20314 sinkMBB->splice(sinkMBB->begin(), MBB,
20315 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20316 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20320 // # fallthrough to mainMBB
20321 // # abortion to sinkMBB
20322 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20323 thisMBB->addSuccessor(mainMBB);
20324 thisMBB->addSuccessor(sinkMBB);
20328 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20329 mainMBB->addSuccessor(sinkMBB);
20332 // EAX is live into the sinkMBB
20333 sinkMBB->addLiveIn(X86::EAX);
20334 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20335 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20338 MI->eraseFromParent();
20342 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20343 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20344 // in the .td file.
20345 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20346 const TargetInstrInfo *TII) {
20348 switch (MI->getOpcode()) {
20349 default: llvm_unreachable("illegal opcode!");
20350 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20351 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20352 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20353 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20354 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20355 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20356 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20357 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20360 DebugLoc dl = MI->getDebugLoc();
20361 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20363 unsigned NumArgs = MI->getNumOperands();
20364 for (unsigned i = 1; i < NumArgs; ++i) {
20365 MachineOperand &Op = MI->getOperand(i);
20366 if (!(Op.isReg() && Op.isImplicit()))
20367 MIB.addOperand(Op);
20369 if (MI->hasOneMemOperand())
20370 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20372 BuildMI(*BB, MI, dl,
20373 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20374 .addReg(X86::XMM0);
20376 MI->eraseFromParent();
20380 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20381 // defs in an instruction pattern
20382 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20383 const TargetInstrInfo *TII) {
20385 switch (MI->getOpcode()) {
20386 default: llvm_unreachable("illegal opcode!");
20387 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20388 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20389 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20390 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20391 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20392 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20393 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20394 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20397 DebugLoc dl = MI->getDebugLoc();
20398 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20400 unsigned NumArgs = MI->getNumOperands(); // remove the results
20401 for (unsigned i = 1; i < NumArgs; ++i) {
20402 MachineOperand &Op = MI->getOperand(i);
20403 if (!(Op.isReg() && Op.isImplicit()))
20404 MIB.addOperand(Op);
20406 if (MI->hasOneMemOperand())
20407 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20409 BuildMI(*BB, MI, dl,
20410 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20413 MI->eraseFromParent();
20417 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20418 const X86Subtarget *Subtarget) {
20419 DebugLoc dl = MI->getDebugLoc();
20420 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20421 // Address into RAX/EAX, other two args into ECX, EDX.
20422 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20423 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20424 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20425 for (int i = 0; i < X86::AddrNumOperands; ++i)
20426 MIB.addOperand(MI->getOperand(i));
20428 unsigned ValOps = X86::AddrNumOperands;
20429 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20430 .addReg(MI->getOperand(ValOps).getReg());
20431 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20432 .addReg(MI->getOperand(ValOps+1).getReg());
20434 // The instruction doesn't actually take any operands though.
20435 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20437 MI->eraseFromParent(); // The pseudo is gone now.
20441 MachineBasicBlock *
20442 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
20443 MachineBasicBlock *MBB) const {
20444 // Emit va_arg instruction on X86-64.
20446 // Operands to this pseudo-instruction:
20447 // 0 ) Output : destination address (reg)
20448 // 1-5) Input : va_list address (addr, i64mem)
20449 // 6 ) ArgSize : Size (in bytes) of vararg type
20450 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20451 // 8 ) Align : Alignment of type
20452 // 9 ) EFLAGS (implicit-def)
20454 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20455 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20457 unsigned DestReg = MI->getOperand(0).getReg();
20458 MachineOperand &Base = MI->getOperand(1);
20459 MachineOperand &Scale = MI->getOperand(2);
20460 MachineOperand &Index = MI->getOperand(3);
20461 MachineOperand &Disp = MI->getOperand(4);
20462 MachineOperand &Segment = MI->getOperand(5);
20463 unsigned ArgSize = MI->getOperand(6).getImm();
20464 unsigned ArgMode = MI->getOperand(7).getImm();
20465 unsigned Align = MI->getOperand(8).getImm();
20467 // Memory Reference
20468 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20469 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20470 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20472 // Machine Information
20473 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20474 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20475 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20476 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20477 DebugLoc DL = MI->getDebugLoc();
20479 // struct va_list {
20482 // i64 overflow_area (address)
20483 // i64 reg_save_area (address)
20485 // sizeof(va_list) = 24
20486 // alignment(va_list) = 8
20488 unsigned TotalNumIntRegs = 6;
20489 unsigned TotalNumXMMRegs = 8;
20490 bool UseGPOffset = (ArgMode == 1);
20491 bool UseFPOffset = (ArgMode == 2);
20492 unsigned MaxOffset = TotalNumIntRegs * 8 +
20493 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
20495 /* Align ArgSize to a multiple of 8 */
20496 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
20497 bool NeedsAlign = (Align > 8);
20499 MachineBasicBlock *thisMBB = MBB;
20500 MachineBasicBlock *overflowMBB;
20501 MachineBasicBlock *offsetMBB;
20502 MachineBasicBlock *endMBB;
20504 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
20505 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
20506 unsigned OffsetReg = 0;
20508 if (!UseGPOffset && !UseFPOffset) {
20509 // If we only pull from the overflow region, we don't create a branch.
20510 // We don't need to alter control flow.
20511 OffsetDestReg = 0; // unused
20512 OverflowDestReg = DestReg;
20514 offsetMBB = nullptr;
20515 overflowMBB = thisMBB;
20518 // First emit code to check if gp_offset (or fp_offset) is below the bound.
20519 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
20520 // If not, pull from overflow_area. (branch to overflowMBB)
20525 // offsetMBB overflowMBB
20530 // Registers for the PHI in endMBB
20531 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
20532 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
20534 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20535 MachineFunction *MF = MBB->getParent();
20536 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20537 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20538 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20540 MachineFunction::iterator MBBIter = MBB;
20543 // Insert the new basic blocks
20544 MF->insert(MBBIter, offsetMBB);
20545 MF->insert(MBBIter, overflowMBB);
20546 MF->insert(MBBIter, endMBB);
20548 // Transfer the remainder of MBB and its successor edges to endMBB.
20549 endMBB->splice(endMBB->begin(), thisMBB,
20550 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
20551 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
20553 // Make offsetMBB and overflowMBB successors of thisMBB
20554 thisMBB->addSuccessor(offsetMBB);
20555 thisMBB->addSuccessor(overflowMBB);
20557 // endMBB is a successor of both offsetMBB and overflowMBB
20558 offsetMBB->addSuccessor(endMBB);
20559 overflowMBB->addSuccessor(endMBB);
20561 // Load the offset value into a register
20562 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20563 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
20567 .addDisp(Disp, UseFPOffset ? 4 : 0)
20568 .addOperand(Segment)
20569 .setMemRefs(MMOBegin, MMOEnd);
20571 // Check if there is enough room left to pull this argument.
20572 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
20574 .addImm(MaxOffset + 8 - ArgSizeA8);
20576 // Branch to "overflowMBB" if offset >= max
20577 // Fall through to "offsetMBB" otherwise
20578 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
20579 .addMBB(overflowMBB);
20582 // In offsetMBB, emit code to use the reg_save_area.
20584 assert(OffsetReg != 0);
20586 // Read the reg_save_area address.
20587 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
20588 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
20593 .addOperand(Segment)
20594 .setMemRefs(MMOBegin, MMOEnd);
20596 // Zero-extend the offset
20597 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
20598 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
20601 .addImm(X86::sub_32bit);
20603 // Add the offset to the reg_save_area to get the final address.
20604 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
20605 .addReg(OffsetReg64)
20606 .addReg(RegSaveReg);
20608 // Compute the offset for the next argument
20609 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20610 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
20612 .addImm(UseFPOffset ? 16 : 8);
20614 // Store it back into the va_list.
20615 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
20619 .addDisp(Disp, UseFPOffset ? 4 : 0)
20620 .addOperand(Segment)
20621 .addReg(NextOffsetReg)
20622 .setMemRefs(MMOBegin, MMOEnd);
20625 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
20630 // Emit code to use overflow area
20633 // Load the overflow_area address into a register.
20634 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
20635 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
20640 .addOperand(Segment)
20641 .setMemRefs(MMOBegin, MMOEnd);
20643 // If we need to align it, do so. Otherwise, just copy the address
20644 // to OverflowDestReg.
20646 // Align the overflow address
20647 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
20648 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
20650 // aligned_addr = (addr + (align-1)) & ~(align-1)
20651 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
20652 .addReg(OverflowAddrReg)
20655 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
20657 .addImm(~(uint64_t)(Align-1));
20659 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
20660 .addReg(OverflowAddrReg);
20663 // Compute the next overflow address after this argument.
20664 // (the overflow address should be kept 8-byte aligned)
20665 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
20666 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
20667 .addReg(OverflowDestReg)
20668 .addImm(ArgSizeA8);
20670 // Store the new overflow address.
20671 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
20676 .addOperand(Segment)
20677 .addReg(NextAddrReg)
20678 .setMemRefs(MMOBegin, MMOEnd);
20680 // If we branched, emit the PHI to the front of endMBB.
20682 BuildMI(*endMBB, endMBB->begin(), DL,
20683 TII->get(X86::PHI), DestReg)
20684 .addReg(OffsetDestReg).addMBB(offsetMBB)
20685 .addReg(OverflowDestReg).addMBB(overflowMBB);
20688 // Erase the pseudo instruction
20689 MI->eraseFromParent();
20694 MachineBasicBlock *
20695 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
20697 MachineBasicBlock *MBB) const {
20698 // Emit code to save XMM registers to the stack. The ABI says that the
20699 // number of registers to save is given in %al, so it's theoretically
20700 // possible to do an indirect jump trick to avoid saving all of them,
20701 // however this code takes a simpler approach and just executes all
20702 // of the stores if %al is non-zero. It's less code, and it's probably
20703 // easier on the hardware branch predictor, and stores aren't all that
20704 // expensive anyway.
20706 // Create the new basic blocks. One block contains all the XMM stores,
20707 // and one block is the final destination regardless of whether any
20708 // stores were performed.
20709 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20710 MachineFunction *F = MBB->getParent();
20711 MachineFunction::iterator MBBIter = MBB;
20713 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
20714 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
20715 F->insert(MBBIter, XMMSaveMBB);
20716 F->insert(MBBIter, EndMBB);
20718 // Transfer the remainder of MBB and its successor edges to EndMBB.
20719 EndMBB->splice(EndMBB->begin(), MBB,
20720 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20721 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
20723 // The original block will now fall through to the XMM save block.
20724 MBB->addSuccessor(XMMSaveMBB);
20725 // The XMMSaveMBB will fall through to the end block.
20726 XMMSaveMBB->addSuccessor(EndMBB);
20728 // Now add the instructions.
20729 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20730 DebugLoc DL = MI->getDebugLoc();
20732 unsigned CountReg = MI->getOperand(0).getReg();
20733 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
20734 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
20736 if (!Subtarget->isTargetWin64()) {
20737 // If %al is 0, branch around the XMM save block.
20738 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
20739 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
20740 MBB->addSuccessor(EndMBB);
20743 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
20744 // that was just emitted, but clearly shouldn't be "saved".
20745 assert((MI->getNumOperands() <= 3 ||
20746 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
20747 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
20748 && "Expected last argument to be EFLAGS");
20749 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
20750 // In the XMM save block, save all the XMM argument registers.
20751 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
20752 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
20753 MachineMemOperand *MMO =
20754 F->getMachineMemOperand(
20755 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
20756 MachineMemOperand::MOStore,
20757 /*Size=*/16, /*Align=*/16);
20758 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
20759 .addFrameIndex(RegSaveFrameIndex)
20760 .addImm(/*Scale=*/1)
20761 .addReg(/*IndexReg=*/0)
20762 .addImm(/*Disp=*/Offset)
20763 .addReg(/*Segment=*/0)
20764 .addReg(MI->getOperand(i).getReg())
20765 .addMemOperand(MMO);
20768 MI->eraseFromParent(); // The pseudo instruction is gone now.
20773 // The EFLAGS operand of SelectItr might be missing a kill marker
20774 // because there were multiple uses of EFLAGS, and ISel didn't know
20775 // which to mark. Figure out whether SelectItr should have had a
20776 // kill marker, and set it if it should. Returns the correct kill
20778 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
20779 MachineBasicBlock* BB,
20780 const TargetRegisterInfo* TRI) {
20781 // Scan forward through BB for a use/def of EFLAGS.
20782 MachineBasicBlock::iterator miI(std::next(SelectItr));
20783 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
20784 const MachineInstr& mi = *miI;
20785 if (mi.readsRegister(X86::EFLAGS))
20787 if (mi.definesRegister(X86::EFLAGS))
20788 break; // Should have kill-flag - update below.
20791 // If we hit the end of the block, check whether EFLAGS is live into a
20793 if (miI == BB->end()) {
20794 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
20795 sEnd = BB->succ_end();
20796 sItr != sEnd; ++sItr) {
20797 MachineBasicBlock* succ = *sItr;
20798 if (succ->isLiveIn(X86::EFLAGS))
20803 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
20804 // out. SelectMI should have a kill flag on EFLAGS.
20805 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
20809 MachineBasicBlock *
20810 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
20811 MachineBasicBlock *BB) const {
20812 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20813 DebugLoc DL = MI->getDebugLoc();
20815 // To "insert" a SELECT_CC instruction, we actually have to insert the
20816 // diamond control-flow pattern. The incoming instruction knows the
20817 // destination vreg to set, the condition code register to branch on, the
20818 // true/false values to select between, and a branch opcode to use.
20819 const BasicBlock *LLVM_BB = BB->getBasicBlock();
20820 MachineFunction::iterator It = BB;
20826 // cmpTY ccX, r1, r2
20828 // fallthrough --> copy0MBB
20829 MachineBasicBlock *thisMBB = BB;
20830 MachineFunction *F = BB->getParent();
20831 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
20832 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
20833 F->insert(It, copy0MBB);
20834 F->insert(It, sinkMBB);
20836 // If the EFLAGS register isn't dead in the terminator, then claim that it's
20837 // live into the sink and copy blocks.
20838 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
20839 if (!MI->killsRegister(X86::EFLAGS) &&
20840 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
20841 copy0MBB->addLiveIn(X86::EFLAGS);
20842 sinkMBB->addLiveIn(X86::EFLAGS);
20845 // Transfer the remainder of BB and its successor edges to sinkMBB.
20846 sinkMBB->splice(sinkMBB->begin(), BB,
20847 std::next(MachineBasicBlock::iterator(MI)), BB->end());
20848 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
20850 // Add the true and fallthrough blocks as its successors.
20851 BB->addSuccessor(copy0MBB);
20852 BB->addSuccessor(sinkMBB);
20854 // Create the conditional branch instruction.
20856 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
20857 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
20860 // %FalseValue = ...
20861 // # fallthrough to sinkMBB
20862 copy0MBB->addSuccessor(sinkMBB);
20865 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
20867 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20868 TII->get(X86::PHI), MI->getOperand(0).getReg())
20869 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
20870 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
20872 MI->eraseFromParent(); // The pseudo instruction is gone now.
20876 MachineBasicBlock *
20877 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
20878 MachineBasicBlock *BB) const {
20879 MachineFunction *MF = BB->getParent();
20880 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20881 DebugLoc DL = MI->getDebugLoc();
20882 const BasicBlock *LLVM_BB = BB->getBasicBlock();
20884 assert(MF->shouldSplitStack());
20886 const bool Is64Bit = Subtarget->is64Bit();
20887 const bool IsLP64 = Subtarget->isTarget64BitLP64();
20889 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
20890 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
20893 // ... [Till the alloca]
20894 // If stacklet is not large enough, jump to mallocMBB
20897 // Allocate by subtracting from RSP
20898 // Jump to continueMBB
20901 // Allocate by call to runtime
20905 // [rest of original BB]
20908 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20909 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20910 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20912 MachineRegisterInfo &MRI = MF->getRegInfo();
20913 const TargetRegisterClass *AddrRegClass =
20914 getRegClassFor(getPointerTy());
20916 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
20917 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
20918 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
20919 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
20920 sizeVReg = MI->getOperand(1).getReg(),
20921 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
20923 MachineFunction::iterator MBBIter = BB;
20926 MF->insert(MBBIter, bumpMBB);
20927 MF->insert(MBBIter, mallocMBB);
20928 MF->insert(MBBIter, continueMBB);
20930 continueMBB->splice(continueMBB->begin(), BB,
20931 std::next(MachineBasicBlock::iterator(MI)), BB->end());
20932 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
20934 // Add code to the main basic block to check if the stack limit has been hit,
20935 // and if so, jump to mallocMBB otherwise to bumpMBB.
20936 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
20937 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
20938 .addReg(tmpSPVReg).addReg(sizeVReg);
20939 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
20940 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
20941 .addReg(SPLimitVReg);
20942 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
20944 // bumpMBB simply decreases the stack pointer, since we know the current
20945 // stacklet has enough space.
20946 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
20947 .addReg(SPLimitVReg);
20948 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
20949 .addReg(SPLimitVReg);
20950 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
20952 // Calls into a routine in libgcc to allocate more space from the heap.
20953 const uint32_t *RegMask =
20954 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
20956 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
20958 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
20959 .addExternalSymbol("__morestack_allocate_stack_space")
20960 .addRegMask(RegMask)
20961 .addReg(X86::RDI, RegState::Implicit)
20962 .addReg(X86::RAX, RegState::ImplicitDefine);
20963 } else if (Is64Bit) {
20964 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
20966 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
20967 .addExternalSymbol("__morestack_allocate_stack_space")
20968 .addRegMask(RegMask)
20969 .addReg(X86::EDI, RegState::Implicit)
20970 .addReg(X86::EAX, RegState::ImplicitDefine);
20972 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
20974 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
20975 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
20976 .addExternalSymbol("__morestack_allocate_stack_space")
20977 .addRegMask(RegMask)
20978 .addReg(X86::EAX, RegState::ImplicitDefine);
20982 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
20985 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
20986 .addReg(IsLP64 ? X86::RAX : X86::EAX);
20987 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
20989 // Set up the CFG correctly.
20990 BB->addSuccessor(bumpMBB);
20991 BB->addSuccessor(mallocMBB);
20992 mallocMBB->addSuccessor(continueMBB);
20993 bumpMBB->addSuccessor(continueMBB);
20995 // Take care of the PHI nodes.
20996 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
20997 MI->getOperand(0).getReg())
20998 .addReg(mallocPtrVReg).addMBB(mallocMBB)
20999 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21001 // Delete the original pseudo instruction.
21002 MI->eraseFromParent();
21005 return continueMBB;
21008 MachineBasicBlock *
21009 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21010 MachineBasicBlock *BB) const {
21011 DebugLoc DL = MI->getDebugLoc();
21013 assert(!Subtarget->isTargetMachO());
21015 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21017 MI->eraseFromParent(); // The pseudo instruction is gone now.
21021 MachineBasicBlock *
21022 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21023 MachineBasicBlock *BB) const {
21024 // This is pretty easy. We're taking the value that we received from
21025 // our load from the relocation, sticking it in either RDI (x86-64)
21026 // or EAX and doing an indirect call. The return value will then
21027 // be in the normal return register.
21028 MachineFunction *F = BB->getParent();
21029 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21030 DebugLoc DL = MI->getDebugLoc();
21032 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21033 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21035 // Get a register mask for the lowered call.
21036 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21037 // proper register mask.
21038 const uint32_t *RegMask =
21039 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21040 if (Subtarget->is64Bit()) {
21041 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21042 TII->get(X86::MOV64rm), X86::RDI)
21044 .addImm(0).addReg(0)
21045 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21046 MI->getOperand(3).getTargetFlags())
21048 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21049 addDirectMem(MIB, X86::RDI);
21050 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21051 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21052 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21053 TII->get(X86::MOV32rm), X86::EAX)
21055 .addImm(0).addReg(0)
21056 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21057 MI->getOperand(3).getTargetFlags())
21059 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21060 addDirectMem(MIB, X86::EAX);
21061 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21063 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21064 TII->get(X86::MOV32rm), X86::EAX)
21065 .addReg(TII->getGlobalBaseReg(F))
21066 .addImm(0).addReg(0)
21067 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21068 MI->getOperand(3).getTargetFlags())
21070 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21071 addDirectMem(MIB, X86::EAX);
21072 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21075 MI->eraseFromParent(); // The pseudo instruction is gone now.
21079 MachineBasicBlock *
21080 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21081 MachineBasicBlock *MBB) const {
21082 DebugLoc DL = MI->getDebugLoc();
21083 MachineFunction *MF = MBB->getParent();
21084 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21085 MachineRegisterInfo &MRI = MF->getRegInfo();
21087 const BasicBlock *BB = MBB->getBasicBlock();
21088 MachineFunction::iterator I = MBB;
21091 // Memory Reference
21092 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21093 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21096 unsigned MemOpndSlot = 0;
21098 unsigned CurOp = 0;
21100 DstReg = MI->getOperand(CurOp++).getReg();
21101 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21102 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21103 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21104 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21106 MemOpndSlot = CurOp;
21108 MVT PVT = getPointerTy();
21109 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21110 "Invalid Pointer Size!");
21112 // For v = setjmp(buf), we generate
21115 // buf[LabelOffset] = restoreMBB
21116 // SjLjSetup restoreMBB
21122 // v = phi(main, restore)
21125 // if base pointer being used, load it from frame
21128 MachineBasicBlock *thisMBB = MBB;
21129 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21130 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21131 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21132 MF->insert(I, mainMBB);
21133 MF->insert(I, sinkMBB);
21134 MF->push_back(restoreMBB);
21136 MachineInstrBuilder MIB;
21138 // Transfer the remainder of BB and its successor edges to sinkMBB.
21139 sinkMBB->splice(sinkMBB->begin(), MBB,
21140 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21141 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21144 unsigned PtrStoreOpc = 0;
21145 unsigned LabelReg = 0;
21146 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21147 Reloc::Model RM = MF->getTarget().getRelocationModel();
21148 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21149 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21151 // Prepare IP either in reg or imm.
21152 if (!UseImmLabel) {
21153 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21154 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21155 LabelReg = MRI.createVirtualRegister(PtrRC);
21156 if (Subtarget->is64Bit()) {
21157 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21161 .addMBB(restoreMBB)
21164 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21165 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21166 .addReg(XII->getGlobalBaseReg(MF))
21169 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21173 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21175 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21176 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21177 if (i == X86::AddrDisp)
21178 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21180 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21183 MIB.addReg(LabelReg);
21185 MIB.addMBB(restoreMBB);
21186 MIB.setMemRefs(MMOBegin, MMOEnd);
21188 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21189 .addMBB(restoreMBB);
21191 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21192 MIB.addRegMask(RegInfo->getNoPreservedMask());
21193 thisMBB->addSuccessor(mainMBB);
21194 thisMBB->addSuccessor(restoreMBB);
21198 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21199 mainMBB->addSuccessor(sinkMBB);
21202 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21203 TII->get(X86::PHI), DstReg)
21204 .addReg(mainDstReg).addMBB(mainMBB)
21205 .addReg(restoreDstReg).addMBB(restoreMBB);
21208 if (RegInfo->hasBasePointer(*MF)) {
21209 const bool Uses64BitFramePtr =
21210 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
21211 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21212 X86FI->setRestoreBasePointer(MF);
21213 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21214 unsigned BasePtr = RegInfo->getBaseRegister();
21215 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21216 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21217 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21218 .setMIFlag(MachineInstr::FrameSetup);
21220 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21221 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21222 restoreMBB->addSuccessor(sinkMBB);
21224 MI->eraseFromParent();
21228 MachineBasicBlock *
21229 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21230 MachineBasicBlock *MBB) const {
21231 DebugLoc DL = MI->getDebugLoc();
21232 MachineFunction *MF = MBB->getParent();
21233 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21234 MachineRegisterInfo &MRI = MF->getRegInfo();
21236 // Memory Reference
21237 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21238 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21240 MVT PVT = getPointerTy();
21241 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21242 "Invalid Pointer Size!");
21244 const TargetRegisterClass *RC =
21245 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21246 unsigned Tmp = MRI.createVirtualRegister(RC);
21247 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21248 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21249 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21250 unsigned SP = RegInfo->getStackRegister();
21252 MachineInstrBuilder MIB;
21254 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21255 const int64_t SPOffset = 2 * PVT.getStoreSize();
21257 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21258 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21261 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21262 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21263 MIB.addOperand(MI->getOperand(i));
21264 MIB.setMemRefs(MMOBegin, MMOEnd);
21266 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21267 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21268 if (i == X86::AddrDisp)
21269 MIB.addDisp(MI->getOperand(i), LabelOffset);
21271 MIB.addOperand(MI->getOperand(i));
21273 MIB.setMemRefs(MMOBegin, MMOEnd);
21275 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21276 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21277 if (i == X86::AddrDisp)
21278 MIB.addDisp(MI->getOperand(i), SPOffset);
21280 MIB.addOperand(MI->getOperand(i));
21282 MIB.setMemRefs(MMOBegin, MMOEnd);
21284 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21286 MI->eraseFromParent();
21290 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21291 // accumulator loops. Writing back to the accumulator allows the coalescer
21292 // to remove extra copies in the loop.
21293 MachineBasicBlock *
21294 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21295 MachineBasicBlock *MBB) const {
21296 MachineOperand &AddendOp = MI->getOperand(3);
21298 // Bail out early if the addend isn't a register - we can't switch these.
21299 if (!AddendOp.isReg())
21302 MachineFunction &MF = *MBB->getParent();
21303 MachineRegisterInfo &MRI = MF.getRegInfo();
21305 // Check whether the addend is defined by a PHI:
21306 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21307 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21308 if (!AddendDef.isPHI())
21311 // Look for the following pattern:
21313 // %addend = phi [%entry, 0], [%loop, %result]
21315 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21319 // %addend = phi [%entry, 0], [%loop, %result]
21321 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21323 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21324 assert(AddendDef.getOperand(i).isReg());
21325 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21326 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21327 if (&PHISrcInst == MI) {
21328 // Found a matching instruction.
21329 unsigned NewFMAOpc = 0;
21330 switch (MI->getOpcode()) {
21331 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21332 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21333 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21334 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21335 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21336 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21337 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21338 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21339 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21340 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21341 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21342 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21343 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21344 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21345 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21346 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21347 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21348 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21349 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21350 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21352 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21353 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21354 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21355 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21356 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21357 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21358 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21359 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21360 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21361 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21362 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21363 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21364 default: llvm_unreachable("Unrecognized FMA variant.");
21367 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
21368 MachineInstrBuilder MIB =
21369 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21370 .addOperand(MI->getOperand(0))
21371 .addOperand(MI->getOperand(3))
21372 .addOperand(MI->getOperand(2))
21373 .addOperand(MI->getOperand(1));
21374 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21375 MI->eraseFromParent();
21382 MachineBasicBlock *
21383 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21384 MachineBasicBlock *BB) const {
21385 switch (MI->getOpcode()) {
21386 default: llvm_unreachable("Unexpected instr type to insert");
21387 case X86::TAILJMPd64:
21388 case X86::TAILJMPr64:
21389 case X86::TAILJMPm64:
21390 case X86::TAILJMPd64_REX:
21391 case X86::TAILJMPr64_REX:
21392 case X86::TAILJMPm64_REX:
21393 llvm_unreachable("TAILJMP64 would not be touched here.");
21394 case X86::TCRETURNdi64:
21395 case X86::TCRETURNri64:
21396 case X86::TCRETURNmi64:
21398 case X86::WIN_ALLOCA:
21399 return EmitLoweredWinAlloca(MI, BB);
21400 case X86::SEG_ALLOCA_32:
21401 case X86::SEG_ALLOCA_64:
21402 return EmitLoweredSegAlloca(MI, BB);
21403 case X86::TLSCall_32:
21404 case X86::TLSCall_64:
21405 return EmitLoweredTLSCall(MI, BB);
21406 case X86::CMOV_GR8:
21407 case X86::CMOV_FR32:
21408 case X86::CMOV_FR64:
21409 case X86::CMOV_V4F32:
21410 case X86::CMOV_V2F64:
21411 case X86::CMOV_V2I64:
21412 case X86::CMOV_V8F32:
21413 case X86::CMOV_V4F64:
21414 case X86::CMOV_V4I64:
21415 case X86::CMOV_V16F32:
21416 case X86::CMOV_V8F64:
21417 case X86::CMOV_V8I64:
21418 case X86::CMOV_GR16:
21419 case X86::CMOV_GR32:
21420 case X86::CMOV_RFP32:
21421 case X86::CMOV_RFP64:
21422 case X86::CMOV_RFP80:
21423 return EmitLoweredSelect(MI, BB);
21425 case X86::FP32_TO_INT16_IN_MEM:
21426 case X86::FP32_TO_INT32_IN_MEM:
21427 case X86::FP32_TO_INT64_IN_MEM:
21428 case X86::FP64_TO_INT16_IN_MEM:
21429 case X86::FP64_TO_INT32_IN_MEM:
21430 case X86::FP64_TO_INT64_IN_MEM:
21431 case X86::FP80_TO_INT16_IN_MEM:
21432 case X86::FP80_TO_INT32_IN_MEM:
21433 case X86::FP80_TO_INT64_IN_MEM: {
21434 MachineFunction *F = BB->getParent();
21435 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21436 DebugLoc DL = MI->getDebugLoc();
21438 // Change the floating point control register to use "round towards zero"
21439 // mode when truncating to an integer value.
21440 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21441 addFrameReference(BuildMI(*BB, MI, DL,
21442 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21444 // Load the old value of the high byte of the control word...
21446 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21447 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21450 // Set the high part to be round to zero...
21451 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21454 // Reload the modified control word now...
21455 addFrameReference(BuildMI(*BB, MI, DL,
21456 TII->get(X86::FLDCW16m)), CWFrameIdx);
21458 // Restore the memory image of control word to original value
21459 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21462 // Get the X86 opcode to use.
21464 switch (MI->getOpcode()) {
21465 default: llvm_unreachable("illegal opcode!");
21466 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21467 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21468 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21469 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21470 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21471 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21472 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21473 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21474 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21478 MachineOperand &Op = MI->getOperand(0);
21480 AM.BaseType = X86AddressMode::RegBase;
21481 AM.Base.Reg = Op.getReg();
21483 AM.BaseType = X86AddressMode::FrameIndexBase;
21484 AM.Base.FrameIndex = Op.getIndex();
21486 Op = MI->getOperand(1);
21488 AM.Scale = Op.getImm();
21489 Op = MI->getOperand(2);
21491 AM.IndexReg = Op.getImm();
21492 Op = MI->getOperand(3);
21493 if (Op.isGlobal()) {
21494 AM.GV = Op.getGlobal();
21496 AM.Disp = Op.getImm();
21498 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
21499 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
21501 // Reload the original control word now.
21502 addFrameReference(BuildMI(*BB, MI, DL,
21503 TII->get(X86::FLDCW16m)), CWFrameIdx);
21505 MI->eraseFromParent(); // The pseudo instruction is gone now.
21508 // String/text processing lowering.
21509 case X86::PCMPISTRM128REG:
21510 case X86::VPCMPISTRM128REG:
21511 case X86::PCMPISTRM128MEM:
21512 case X86::VPCMPISTRM128MEM:
21513 case X86::PCMPESTRM128REG:
21514 case X86::VPCMPESTRM128REG:
21515 case X86::PCMPESTRM128MEM:
21516 case X86::VPCMPESTRM128MEM:
21517 assert(Subtarget->hasSSE42() &&
21518 "Target must have SSE4.2 or AVX features enabled");
21519 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
21521 // String/text processing lowering.
21522 case X86::PCMPISTRIREG:
21523 case X86::VPCMPISTRIREG:
21524 case X86::PCMPISTRIMEM:
21525 case X86::VPCMPISTRIMEM:
21526 case X86::PCMPESTRIREG:
21527 case X86::VPCMPESTRIREG:
21528 case X86::PCMPESTRIMEM:
21529 case X86::VPCMPESTRIMEM:
21530 assert(Subtarget->hasSSE42() &&
21531 "Target must have SSE4.2 or AVX features enabled");
21532 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
21534 // Thread synchronization.
21536 return EmitMonitor(MI, BB, Subtarget);
21540 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
21542 case X86::VASTART_SAVE_XMM_REGS:
21543 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
21545 case X86::VAARG_64:
21546 return EmitVAARG64WithCustomInserter(MI, BB);
21548 case X86::EH_SjLj_SetJmp32:
21549 case X86::EH_SjLj_SetJmp64:
21550 return emitEHSjLjSetJmp(MI, BB);
21552 case X86::EH_SjLj_LongJmp32:
21553 case X86::EH_SjLj_LongJmp64:
21554 return emitEHSjLjLongJmp(MI, BB);
21556 case TargetOpcode::STATEPOINT:
21557 // As an implementation detail, STATEPOINT shares the STACKMAP format at
21558 // this point in the process. We diverge later.
21559 return emitPatchPoint(MI, BB);
21561 case TargetOpcode::STACKMAP:
21562 case TargetOpcode::PATCHPOINT:
21563 return emitPatchPoint(MI, BB);
21565 case X86::VFMADDPDr213r:
21566 case X86::VFMADDPSr213r:
21567 case X86::VFMADDSDr213r:
21568 case X86::VFMADDSSr213r:
21569 case X86::VFMSUBPDr213r:
21570 case X86::VFMSUBPSr213r:
21571 case X86::VFMSUBSDr213r:
21572 case X86::VFMSUBSSr213r:
21573 case X86::VFNMADDPDr213r:
21574 case X86::VFNMADDPSr213r:
21575 case X86::VFNMADDSDr213r:
21576 case X86::VFNMADDSSr213r:
21577 case X86::VFNMSUBPDr213r:
21578 case X86::VFNMSUBPSr213r:
21579 case X86::VFNMSUBSDr213r:
21580 case X86::VFNMSUBSSr213r:
21581 case X86::VFMADDSUBPDr213r:
21582 case X86::VFMADDSUBPSr213r:
21583 case X86::VFMSUBADDPDr213r:
21584 case X86::VFMSUBADDPSr213r:
21585 case X86::VFMADDPDr213rY:
21586 case X86::VFMADDPSr213rY:
21587 case X86::VFMSUBPDr213rY:
21588 case X86::VFMSUBPSr213rY:
21589 case X86::VFNMADDPDr213rY:
21590 case X86::VFNMADDPSr213rY:
21591 case X86::VFNMSUBPDr213rY:
21592 case X86::VFNMSUBPSr213rY:
21593 case X86::VFMADDSUBPDr213rY:
21594 case X86::VFMADDSUBPSr213rY:
21595 case X86::VFMSUBADDPDr213rY:
21596 case X86::VFMSUBADDPSr213rY:
21597 return emitFMA3Instr(MI, BB);
21601 //===----------------------------------------------------------------------===//
21602 // X86 Optimization Hooks
21603 //===----------------------------------------------------------------------===//
21605 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
21608 const SelectionDAG &DAG,
21609 unsigned Depth) const {
21610 unsigned BitWidth = KnownZero.getBitWidth();
21611 unsigned Opc = Op.getOpcode();
21612 assert((Opc >= ISD::BUILTIN_OP_END ||
21613 Opc == ISD::INTRINSIC_WO_CHAIN ||
21614 Opc == ISD::INTRINSIC_W_CHAIN ||
21615 Opc == ISD::INTRINSIC_VOID) &&
21616 "Should use MaskedValueIsZero if you don't know whether Op"
21617 " is a target node!");
21619 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
21633 // These nodes' second result is a boolean.
21634 if (Op.getResNo() == 0)
21637 case X86ISD::SETCC:
21638 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
21640 case ISD::INTRINSIC_WO_CHAIN: {
21641 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
21642 unsigned NumLoBits = 0;
21645 case Intrinsic::x86_sse_movmsk_ps:
21646 case Intrinsic::x86_avx_movmsk_ps_256:
21647 case Intrinsic::x86_sse2_movmsk_pd:
21648 case Intrinsic::x86_avx_movmsk_pd_256:
21649 case Intrinsic::x86_mmx_pmovmskb:
21650 case Intrinsic::x86_sse2_pmovmskb_128:
21651 case Intrinsic::x86_avx2_pmovmskb: {
21652 // High bits of movmskp{s|d}, pmovmskb are known zero.
21654 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
21655 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
21656 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
21657 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
21658 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
21659 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
21660 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
21661 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
21663 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
21672 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
21674 const SelectionDAG &,
21675 unsigned Depth) const {
21676 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
21677 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
21678 return Op.getValueType().getScalarType().getSizeInBits();
21684 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
21685 /// node is a GlobalAddress + offset.
21686 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
21687 const GlobalValue* &GA,
21688 int64_t &Offset) const {
21689 if (N->getOpcode() == X86ISD::Wrapper) {
21690 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
21691 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
21692 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
21696 return TargetLowering::isGAPlusOffset(N, GA, Offset);
21699 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
21700 /// same as extracting the high 128-bit part of 256-bit vector and then
21701 /// inserting the result into the low part of a new 256-bit vector
21702 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
21703 EVT VT = SVOp->getValueType(0);
21704 unsigned NumElems = VT.getVectorNumElements();
21706 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
21707 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
21708 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
21709 SVOp->getMaskElt(j) >= 0)
21715 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
21716 /// same as extracting the low 128-bit part of 256-bit vector and then
21717 /// inserting the result into the high part of a new 256-bit vector
21718 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
21719 EVT VT = SVOp->getValueType(0);
21720 unsigned NumElems = VT.getVectorNumElements();
21722 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
21723 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
21724 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
21725 SVOp->getMaskElt(j) >= 0)
21731 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
21732 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
21733 TargetLowering::DAGCombinerInfo &DCI,
21734 const X86Subtarget* Subtarget) {
21736 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
21737 SDValue V1 = SVOp->getOperand(0);
21738 SDValue V2 = SVOp->getOperand(1);
21739 EVT VT = SVOp->getValueType(0);
21740 unsigned NumElems = VT.getVectorNumElements();
21742 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
21743 V2.getOpcode() == ISD::CONCAT_VECTORS) {
21747 // V UNDEF BUILD_VECTOR UNDEF
21749 // CONCAT_VECTOR CONCAT_VECTOR
21752 // RESULT: V + zero extended
21754 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
21755 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
21756 V1.getOperand(1).getOpcode() != ISD::UNDEF)
21759 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
21762 // To match the shuffle mask, the first half of the mask should
21763 // be exactly the first vector, and all the rest a splat with the
21764 // first element of the second one.
21765 for (unsigned i = 0; i != NumElems/2; ++i)
21766 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
21767 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
21770 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
21771 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
21772 if (Ld->hasNUsesOfValue(1, 0)) {
21773 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
21774 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
21776 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
21778 Ld->getPointerInfo(),
21779 Ld->getAlignment(),
21780 false/*isVolatile*/, true/*ReadMem*/,
21781 false/*WriteMem*/);
21783 // Make sure the newly-created LOAD is in the same position as Ld in
21784 // terms of dependency. We create a TokenFactor for Ld and ResNode,
21785 // and update uses of Ld's output chain to use the TokenFactor.
21786 if (Ld->hasAnyUseOfValue(1)) {
21787 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
21788 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
21789 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
21790 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
21791 SDValue(ResNode.getNode(), 1));
21794 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
21798 // Emit a zeroed vector and insert the desired subvector on its
21800 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
21801 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
21802 return DCI.CombineTo(N, InsV);
21805 //===--------------------------------------------------------------------===//
21806 // Combine some shuffles into subvector extracts and inserts:
21809 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
21810 if (isShuffleHigh128VectorInsertLow(SVOp)) {
21811 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
21812 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
21813 return DCI.CombineTo(N, InsV);
21816 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
21817 if (isShuffleLow128VectorInsertHigh(SVOp)) {
21818 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
21819 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
21820 return DCI.CombineTo(N, InsV);
21826 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
21829 /// This is the leaf of the recursive combinine below. When we have found some
21830 /// chain of single-use x86 shuffle instructions and accumulated the combined
21831 /// shuffle mask represented by them, this will try to pattern match that mask
21832 /// into either a single instruction if there is a special purpose instruction
21833 /// for this operation, or into a PSHUFB instruction which is a fully general
21834 /// instruction but should only be used to replace chains over a certain depth.
21835 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
21836 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
21837 TargetLowering::DAGCombinerInfo &DCI,
21838 const X86Subtarget *Subtarget) {
21839 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
21841 // Find the operand that enters the chain. Note that multiple uses are OK
21842 // here, we're not going to remove the operand we find.
21843 SDValue Input = Op.getOperand(0);
21844 while (Input.getOpcode() == ISD::BITCAST)
21845 Input = Input.getOperand(0);
21847 MVT VT = Input.getSimpleValueType();
21848 MVT RootVT = Root.getSimpleValueType();
21851 // Just remove no-op shuffle masks.
21852 if (Mask.size() == 1) {
21853 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
21858 // Use the float domain if the operand type is a floating point type.
21859 bool FloatDomain = VT.isFloatingPoint();
21861 // For floating point shuffles, we don't have free copies in the shuffle
21862 // instructions or the ability to load as part of the instruction, so
21863 // canonicalize their shuffles to UNPCK or MOV variants.
21865 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
21866 // vectors because it can have a load folded into it that UNPCK cannot. This
21867 // doesn't preclude something switching to the shorter encoding post-RA.
21869 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
21870 bool Lo = Mask.equals(0, 0);
21873 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
21874 // is no slower than UNPCKLPD but has the option to fold the input operand
21875 // into even an unaligned memory load.
21876 if (Lo && Subtarget->hasSSE3()) {
21877 Shuffle = X86ISD::MOVDDUP;
21878 ShuffleVT = MVT::v2f64;
21880 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
21881 // than the UNPCK variants.
21882 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
21883 ShuffleVT = MVT::v4f32;
21885 if (Depth == 1 && Root->getOpcode() == Shuffle)
21886 return false; // Nothing to do!
21887 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
21888 DCI.AddToWorklist(Op.getNode());
21889 if (Shuffle == X86ISD::MOVDDUP)
21890 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
21892 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
21893 DCI.AddToWorklist(Op.getNode());
21894 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
21898 if (Subtarget->hasSSE3() &&
21899 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
21900 bool Lo = Mask.equals(0, 0, 2, 2);
21901 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
21902 MVT ShuffleVT = MVT::v4f32;
21903 if (Depth == 1 && Root->getOpcode() == Shuffle)
21904 return false; // Nothing to do!
21905 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
21906 DCI.AddToWorklist(Op.getNode());
21907 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
21908 DCI.AddToWorklist(Op.getNode());
21909 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
21913 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
21914 bool Lo = Mask.equals(0, 0, 1, 1);
21915 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
21916 MVT ShuffleVT = MVT::v4f32;
21917 if (Depth == 1 && Root->getOpcode() == Shuffle)
21918 return false; // Nothing to do!
21919 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
21920 DCI.AddToWorklist(Op.getNode());
21921 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
21922 DCI.AddToWorklist(Op.getNode());
21923 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
21929 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
21930 // variants as none of these have single-instruction variants that are
21931 // superior to the UNPCK formulation.
21932 if (!FloatDomain &&
21933 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
21934 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
21935 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
21936 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
21938 bool Lo = Mask[0] == 0;
21939 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
21940 if (Depth == 1 && Root->getOpcode() == Shuffle)
21941 return false; // Nothing to do!
21943 switch (Mask.size()) {
21945 ShuffleVT = MVT::v8i16;
21948 ShuffleVT = MVT::v16i8;
21951 llvm_unreachable("Impossible mask size!");
21953 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
21954 DCI.AddToWorklist(Op.getNode());
21955 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
21956 DCI.AddToWorklist(Op.getNode());
21957 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
21962 // Don't try to re-form single instruction chains under any circumstances now
21963 // that we've done encoding canonicalization for them.
21967 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
21968 // can replace them with a single PSHUFB instruction profitably. Intel's
21969 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
21970 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
21971 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
21972 SmallVector<SDValue, 16> PSHUFBMask;
21973 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
21974 int Ratio = 16 / Mask.size();
21975 for (unsigned i = 0; i < 16; ++i) {
21976 if (Mask[i / Ratio] == SM_SentinelUndef) {
21977 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
21980 int M = Mask[i / Ratio] != SM_SentinelZero
21981 ? Ratio * Mask[i / Ratio] + i % Ratio
21983 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
21985 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
21986 DCI.AddToWorklist(Op.getNode());
21987 SDValue PSHUFBMaskOp =
21988 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
21989 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
21990 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
21991 DCI.AddToWorklist(Op.getNode());
21992 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
21997 // Failed to find any combines.
22001 /// \brief Fully generic combining of x86 shuffle instructions.
22003 /// This should be the last combine run over the x86 shuffle instructions. Once
22004 /// they have been fully optimized, this will recursively consider all chains
22005 /// of single-use shuffle instructions, build a generic model of the cumulative
22006 /// shuffle operation, and check for simpler instructions which implement this
22007 /// operation. We use this primarily for two purposes:
22009 /// 1) Collapse generic shuffles to specialized single instructions when
22010 /// equivalent. In most cases, this is just an encoding size win, but
22011 /// sometimes we will collapse multiple generic shuffles into a single
22012 /// special-purpose shuffle.
22013 /// 2) Look for sequences of shuffle instructions with 3 or more total
22014 /// instructions, and replace them with the slightly more expensive SSSE3
22015 /// PSHUFB instruction if available. We do this as the last combining step
22016 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22017 /// a suitable short sequence of other instructions. The PHUFB will either
22018 /// use a register or have to read from memory and so is slightly (but only
22019 /// slightly) more expensive than the other shuffle instructions.
22021 /// Because this is inherently a quadratic operation (for each shuffle in
22022 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22023 /// This should never be an issue in practice as the shuffle lowering doesn't
22024 /// produce sequences of more than 8 instructions.
22026 /// FIXME: We will currently miss some cases where the redundant shuffling
22027 /// would simplify under the threshold for PSHUFB formation because of
22028 /// combine-ordering. To fix this, we should do the redundant instruction
22029 /// combining in this recursive walk.
22030 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22031 ArrayRef<int> RootMask,
22032 int Depth, bool HasPSHUFB,
22034 TargetLowering::DAGCombinerInfo &DCI,
22035 const X86Subtarget *Subtarget) {
22036 // Bound the depth of our recursive combine because this is ultimately
22037 // quadratic in nature.
22041 // Directly rip through bitcasts to find the underlying operand.
22042 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22043 Op = Op.getOperand(0);
22045 MVT VT = Op.getSimpleValueType();
22046 if (!VT.isVector())
22047 return false; // Bail if we hit a non-vector.
22048 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22049 // version should be added.
22050 if (VT.getSizeInBits() != 128)
22053 assert(Root.getSimpleValueType().isVector() &&
22054 "Shuffles operate on vector types!");
22055 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22056 "Can only combine shuffles of the same vector register size.");
22058 if (!isTargetShuffle(Op.getOpcode()))
22060 SmallVector<int, 16> OpMask;
22062 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22063 // We only can combine unary shuffles which we can decode the mask for.
22064 if (!HaveMask || !IsUnary)
22067 assert(VT.getVectorNumElements() == OpMask.size() &&
22068 "Different mask size from vector size!");
22069 assert(((RootMask.size() > OpMask.size() &&
22070 RootMask.size() % OpMask.size() == 0) ||
22071 (OpMask.size() > RootMask.size() &&
22072 OpMask.size() % RootMask.size() == 0) ||
22073 OpMask.size() == RootMask.size()) &&
22074 "The smaller number of elements must divide the larger.");
22075 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22076 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22077 assert(((RootRatio == 1 && OpRatio == 1) ||
22078 (RootRatio == 1) != (OpRatio == 1)) &&
22079 "Must not have a ratio for both incoming and op masks!");
22081 SmallVector<int, 16> Mask;
22082 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22084 // Merge this shuffle operation's mask into our accumulated mask. Note that
22085 // this shuffle's mask will be the first applied to the input, followed by the
22086 // root mask to get us all the way to the root value arrangement. The reason
22087 // for this order is that we are recursing up the operation chain.
22088 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22089 int RootIdx = i / RootRatio;
22090 if (RootMask[RootIdx] < 0) {
22091 // This is a zero or undef lane, we're done.
22092 Mask.push_back(RootMask[RootIdx]);
22096 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22097 int OpIdx = RootMaskedIdx / OpRatio;
22098 if (OpMask[OpIdx] < 0) {
22099 // The incoming lanes are zero or undef, it doesn't matter which ones we
22101 Mask.push_back(OpMask[OpIdx]);
22105 // Ok, we have non-zero lanes, map them through.
22106 Mask.push_back(OpMask[OpIdx] * OpRatio +
22107 RootMaskedIdx % OpRatio);
22110 // See if we can recurse into the operand to combine more things.
22111 switch (Op.getOpcode()) {
22112 case X86ISD::PSHUFB:
22114 case X86ISD::PSHUFD:
22115 case X86ISD::PSHUFHW:
22116 case X86ISD::PSHUFLW:
22117 if (Op.getOperand(0).hasOneUse() &&
22118 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22119 HasPSHUFB, DAG, DCI, Subtarget))
22123 case X86ISD::UNPCKL:
22124 case X86ISD::UNPCKH:
22125 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22126 // We can't check for single use, we have to check that this shuffle is the only user.
22127 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22128 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22129 HasPSHUFB, DAG, DCI, Subtarget))
22134 // Minor canonicalization of the accumulated shuffle mask to make it easier
22135 // to match below. All this does is detect masks with squential pairs of
22136 // elements, and shrink them to the half-width mask. It does this in a loop
22137 // so it will reduce the size of the mask to the minimal width mask which
22138 // performs an equivalent shuffle.
22139 SmallVector<int, 16> WidenedMask;
22140 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22141 Mask = std::move(WidenedMask);
22142 WidenedMask.clear();
22145 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22149 /// \brief Get the PSHUF-style mask from PSHUF node.
22151 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22152 /// PSHUF-style masks that can be reused with such instructions.
22153 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22154 SmallVector<int, 4> Mask;
22156 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22160 switch (N.getOpcode()) {
22161 case X86ISD::PSHUFD:
22163 case X86ISD::PSHUFLW:
22166 case X86ISD::PSHUFHW:
22167 Mask.erase(Mask.begin(), Mask.begin() + 4);
22168 for (int &M : Mask)
22172 llvm_unreachable("No valid shuffle instruction found!");
22176 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22178 /// We walk up the chain and look for a combinable shuffle, skipping over
22179 /// shuffles that we could hoist this shuffle's transformation past without
22180 /// altering anything.
22182 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22184 TargetLowering::DAGCombinerInfo &DCI) {
22185 assert(N.getOpcode() == X86ISD::PSHUFD &&
22186 "Called with something other than an x86 128-bit half shuffle!");
22189 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22190 // of the shuffles in the chain so that we can form a fresh chain to replace
22192 SmallVector<SDValue, 8> Chain;
22193 SDValue V = N.getOperand(0);
22194 for (; V.hasOneUse(); V = V.getOperand(0)) {
22195 switch (V.getOpcode()) {
22197 return SDValue(); // Nothing combined!
22200 // Skip bitcasts as we always know the type for the target specific
22204 case X86ISD::PSHUFD:
22205 // Found another dword shuffle.
22208 case X86ISD::PSHUFLW:
22209 // Check that the low words (being shuffled) are the identity in the
22210 // dword shuffle, and the high words are self-contained.
22211 if (Mask[0] != 0 || Mask[1] != 1 ||
22212 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22215 Chain.push_back(V);
22218 case X86ISD::PSHUFHW:
22219 // Check that the high words (being shuffled) are the identity in the
22220 // dword shuffle, and the low words are self-contained.
22221 if (Mask[2] != 2 || Mask[3] != 3 ||
22222 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22225 Chain.push_back(V);
22228 case X86ISD::UNPCKL:
22229 case X86ISD::UNPCKH:
22230 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22231 // shuffle into a preceding word shuffle.
22232 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22235 // Search for a half-shuffle which we can combine with.
22236 unsigned CombineOp =
22237 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22238 if (V.getOperand(0) != V.getOperand(1) ||
22239 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22241 Chain.push_back(V);
22242 V = V.getOperand(0);
22244 switch (V.getOpcode()) {
22246 return SDValue(); // Nothing to combine.
22248 case X86ISD::PSHUFLW:
22249 case X86ISD::PSHUFHW:
22250 if (V.getOpcode() == CombineOp)
22253 Chain.push_back(V);
22257 V = V.getOperand(0);
22261 } while (V.hasOneUse());
22264 // Break out of the loop if we break out of the switch.
22268 if (!V.hasOneUse())
22269 // We fell out of the loop without finding a viable combining instruction.
22272 // Merge this node's mask and our incoming mask.
22273 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22274 for (int &M : Mask)
22276 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22277 getV4X86ShuffleImm8ForMask(Mask, DAG));
22279 // Rebuild the chain around this new shuffle.
22280 while (!Chain.empty()) {
22281 SDValue W = Chain.pop_back_val();
22283 if (V.getValueType() != W.getOperand(0).getValueType())
22284 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22286 switch (W.getOpcode()) {
22288 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22290 case X86ISD::UNPCKL:
22291 case X86ISD::UNPCKH:
22292 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22295 case X86ISD::PSHUFD:
22296 case X86ISD::PSHUFLW:
22297 case X86ISD::PSHUFHW:
22298 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22302 if (V.getValueType() != N.getValueType())
22303 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22305 // Return the new chain to replace N.
22309 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22311 /// We walk up the chain, skipping shuffles of the other half and looking
22312 /// through shuffles which switch halves trying to find a shuffle of the same
22313 /// pair of dwords.
22314 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22316 TargetLowering::DAGCombinerInfo &DCI) {
22318 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22319 "Called with something other than an x86 128-bit half shuffle!");
22321 unsigned CombineOpcode = N.getOpcode();
22323 // Walk up a single-use chain looking for a combinable shuffle.
22324 SDValue V = N.getOperand(0);
22325 for (; V.hasOneUse(); V = V.getOperand(0)) {
22326 switch (V.getOpcode()) {
22328 return false; // Nothing combined!
22331 // Skip bitcasts as we always know the type for the target specific
22335 case X86ISD::PSHUFLW:
22336 case X86ISD::PSHUFHW:
22337 if (V.getOpcode() == CombineOpcode)
22340 // Other-half shuffles are no-ops.
22343 // Break out of the loop if we break out of the switch.
22347 if (!V.hasOneUse())
22348 // We fell out of the loop without finding a viable combining instruction.
22351 // Combine away the bottom node as its shuffle will be accumulated into
22352 // a preceding shuffle.
22353 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22355 // Record the old value.
22358 // Merge this node's mask and our incoming mask (adjusted to account for all
22359 // the pshufd instructions encountered).
22360 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22361 for (int &M : Mask)
22363 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22364 getV4X86ShuffleImm8ForMask(Mask, DAG));
22366 // Check that the shuffles didn't cancel each other out. If not, we need to
22367 // combine to the new one.
22369 // Replace the combinable shuffle with the combined one, updating all users
22370 // so that we re-evaluate the chain here.
22371 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22376 /// \brief Try to combine x86 target specific shuffles.
22377 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22378 TargetLowering::DAGCombinerInfo &DCI,
22379 const X86Subtarget *Subtarget) {
22381 MVT VT = N.getSimpleValueType();
22382 SmallVector<int, 4> Mask;
22384 switch (N.getOpcode()) {
22385 case X86ISD::PSHUFD:
22386 case X86ISD::PSHUFLW:
22387 case X86ISD::PSHUFHW:
22388 Mask = getPSHUFShuffleMask(N);
22389 assert(Mask.size() == 4);
22395 // Nuke no-op shuffles that show up after combining.
22396 if (isNoopShuffleMask(Mask))
22397 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22399 // Look for simplifications involving one or two shuffle instructions.
22400 SDValue V = N.getOperand(0);
22401 switch (N.getOpcode()) {
22404 case X86ISD::PSHUFLW:
22405 case X86ISD::PSHUFHW:
22406 assert(VT == MVT::v8i16);
22409 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22410 return SDValue(); // We combined away this shuffle, so we're done.
22412 // See if this reduces to a PSHUFD which is no more expensive and can
22413 // combine with more operations. Note that it has to at least flip the
22414 // dwords as otherwise it would have been removed as a no-op.
22415 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22416 int DMask[] = {0, 1, 2, 3};
22417 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22418 DMask[DOffset + 0] = DOffset + 1;
22419 DMask[DOffset + 1] = DOffset + 0;
22420 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22421 DCI.AddToWorklist(V.getNode());
22422 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22423 getV4X86ShuffleImm8ForMask(DMask, DAG));
22424 DCI.AddToWorklist(V.getNode());
22425 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22428 // Look for shuffle patterns which can be implemented as a single unpack.
22429 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22430 // only works when we have a PSHUFD followed by two half-shuffles.
22431 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22432 (V.getOpcode() == X86ISD::PSHUFLW ||
22433 V.getOpcode() == X86ISD::PSHUFHW) &&
22434 V.getOpcode() != N.getOpcode() &&
22436 SDValue D = V.getOperand(0);
22437 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22438 D = D.getOperand(0);
22439 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22440 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22441 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22442 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22443 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22445 for (int i = 0; i < 4; ++i) {
22446 WordMask[i + NOffset] = Mask[i] + NOffset;
22447 WordMask[i + VOffset] = VMask[i] + VOffset;
22449 // Map the word mask through the DWord mask.
22451 for (int i = 0; i < 8; ++i)
22452 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22453 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22454 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22455 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22456 std::begin(UnpackLoMask)) ||
22457 std::equal(std::begin(MappedMask), std::end(MappedMask),
22458 std::begin(UnpackHiMask))) {
22459 // We can replace all three shuffles with an unpack.
22460 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22461 DCI.AddToWorklist(V.getNode());
22462 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22464 DL, MVT::v8i16, V, V);
22471 case X86ISD::PSHUFD:
22472 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22481 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22483 /// We combine this directly on the abstract vector shuffle nodes so it is
22484 /// easier to generically match. We also insert dummy vector shuffle nodes for
22485 /// the operands which explicitly discard the lanes which are unused by this
22486 /// operation to try to flow through the rest of the combiner the fact that
22487 /// they're unused.
22488 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
22490 EVT VT = N->getValueType(0);
22492 // We only handle target-independent shuffles.
22493 // FIXME: It would be easy and harmless to use the target shuffle mask
22494 // extraction tool to support more.
22495 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
22498 auto *SVN = cast<ShuffleVectorSDNode>(N);
22499 ArrayRef<int> Mask = SVN->getMask();
22500 SDValue V1 = N->getOperand(0);
22501 SDValue V2 = N->getOperand(1);
22503 // We require the first shuffle operand to be the SUB node, and the second to
22504 // be the ADD node.
22505 // FIXME: We should support the commuted patterns.
22506 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
22509 // If there are other uses of these operations we can't fold them.
22510 if (!V1->hasOneUse() || !V2->hasOneUse())
22513 // Ensure that both operations have the same operands. Note that we can
22514 // commute the FADD operands.
22515 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
22516 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
22517 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
22520 // We're looking for blends between FADD and FSUB nodes. We insist on these
22521 // nodes being lined up in a specific expected pattern.
22522 if (!(isShuffleEquivalent(Mask, 0, 3) ||
22523 isShuffleEquivalent(Mask, 0, 5, 2, 7) ||
22524 isShuffleEquivalent(Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
22527 // Only specific types are legal at this point, assert so we notice if and
22528 // when these change.
22529 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
22530 VT == MVT::v4f64) &&
22531 "Unknown vector type encountered!");
22533 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
22536 /// PerformShuffleCombine - Performs several different shuffle combines.
22537 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
22538 TargetLowering::DAGCombinerInfo &DCI,
22539 const X86Subtarget *Subtarget) {
22541 SDValue N0 = N->getOperand(0);
22542 SDValue N1 = N->getOperand(1);
22543 EVT VT = N->getValueType(0);
22545 // Don't create instructions with illegal types after legalize types has run.
22546 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22547 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
22550 // If we have legalized the vector types, look for blends of FADD and FSUB
22551 // nodes that we can fuse into an ADDSUB node.
22552 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
22553 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
22556 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
22557 if (Subtarget->hasFp256() && VT.is256BitVector() &&
22558 N->getOpcode() == ISD::VECTOR_SHUFFLE)
22559 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
22561 // During Type Legalization, when promoting illegal vector types,
22562 // the backend might introduce new shuffle dag nodes and bitcasts.
22564 // This code performs the following transformation:
22565 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
22566 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
22568 // We do this only if both the bitcast and the BINOP dag nodes have
22569 // one use. Also, perform this transformation only if the new binary
22570 // operation is legal. This is to avoid introducing dag nodes that
22571 // potentially need to be further expanded (or custom lowered) into a
22572 // less optimal sequence of dag nodes.
22573 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
22574 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
22575 N0.getOpcode() == ISD::BITCAST) {
22576 SDValue BC0 = N0.getOperand(0);
22577 EVT SVT = BC0.getValueType();
22578 unsigned Opcode = BC0.getOpcode();
22579 unsigned NumElts = VT.getVectorNumElements();
22581 if (BC0.hasOneUse() && SVT.isVector() &&
22582 SVT.getVectorNumElements() * 2 == NumElts &&
22583 TLI.isOperationLegal(Opcode, VT)) {
22584 bool CanFold = false;
22596 unsigned SVTNumElts = SVT.getVectorNumElements();
22597 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22598 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
22599 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
22600 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
22601 CanFold = SVOp->getMaskElt(i) < 0;
22604 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
22605 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
22606 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
22607 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
22612 // Only handle 128 wide vector from here on.
22613 if (!VT.is128BitVector())
22616 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
22617 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
22618 // consecutive, non-overlapping, and in the right order.
22619 SmallVector<SDValue, 16> Elts;
22620 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
22621 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
22623 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
22627 if (isTargetShuffle(N->getOpcode())) {
22629 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
22630 if (Shuffle.getNode())
22633 // Try recursively combining arbitrary sequences of x86 shuffle
22634 // instructions into higher-order shuffles. We do this after combining
22635 // specific PSHUF instruction sequences into their minimal form so that we
22636 // can evaluate how many specialized shuffle instructions are involved in
22637 // a particular chain.
22638 SmallVector<int, 1> NonceMask; // Just a placeholder.
22639 NonceMask.push_back(0);
22640 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
22641 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
22643 return SDValue(); // This routine will use CombineTo to replace N.
22649 /// PerformTruncateCombine - Converts truncate operation to
22650 /// a sequence of vector shuffle operations.
22651 /// It is possible when we truncate 256-bit vector to 128-bit vector
22652 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
22653 TargetLowering::DAGCombinerInfo &DCI,
22654 const X86Subtarget *Subtarget) {
22658 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
22659 /// specific shuffle of a load can be folded into a single element load.
22660 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
22661 /// shuffles have been custom lowered so we need to handle those here.
22662 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
22663 TargetLowering::DAGCombinerInfo &DCI) {
22664 if (DCI.isBeforeLegalizeOps())
22667 SDValue InVec = N->getOperand(0);
22668 SDValue EltNo = N->getOperand(1);
22670 if (!isa<ConstantSDNode>(EltNo))
22673 EVT OriginalVT = InVec.getValueType();
22675 if (InVec.getOpcode() == ISD::BITCAST) {
22676 // Don't duplicate a load with other uses.
22677 if (!InVec.hasOneUse())
22679 EVT BCVT = InVec.getOperand(0).getValueType();
22680 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
22682 InVec = InVec.getOperand(0);
22685 EVT CurrentVT = InVec.getValueType();
22687 if (!isTargetShuffle(InVec.getOpcode()))
22690 // Don't duplicate a load with other uses.
22691 if (!InVec.hasOneUse())
22694 SmallVector<int, 16> ShuffleMask;
22696 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
22697 ShuffleMask, UnaryShuffle))
22700 // Select the input vector, guarding against out of range extract vector.
22701 unsigned NumElems = CurrentVT.getVectorNumElements();
22702 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
22703 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
22704 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
22705 : InVec.getOperand(1);
22707 // If inputs to shuffle are the same for both ops, then allow 2 uses
22708 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
22709 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
22711 if (LdNode.getOpcode() == ISD::BITCAST) {
22712 // Don't duplicate a load with other uses.
22713 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
22716 AllowedUses = 1; // only allow 1 load use if we have a bitcast
22717 LdNode = LdNode.getOperand(0);
22720 if (!ISD::isNormalLoad(LdNode.getNode()))
22723 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
22725 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
22728 EVT EltVT = N->getValueType(0);
22729 // If there's a bitcast before the shuffle, check if the load type and
22730 // alignment is valid.
22731 unsigned Align = LN0->getAlignment();
22732 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22733 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
22734 EltVT.getTypeForEVT(*DAG.getContext()));
22736 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
22739 // All checks match so transform back to vector_shuffle so that DAG combiner
22740 // can finish the job
22743 // Create shuffle node taking into account the case that its a unary shuffle
22744 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
22745 : InVec.getOperand(1);
22746 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
22747 InVec.getOperand(0), Shuffle,
22749 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
22750 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
22754 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
22755 /// generation and convert it from being a bunch of shuffles and extracts
22756 /// into a somewhat faster sequence. For i686, the best sequence is apparently
22757 /// storing the value and loading scalars back, while for x64 we should
22758 /// use 64-bit extracts and shifts.
22759 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
22760 TargetLowering::DAGCombinerInfo &DCI) {
22761 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
22762 if (NewOp.getNode())
22765 SDValue InputVector = N->getOperand(0);
22767 // Detect mmx to i32 conversion through a v2i32 elt extract.
22768 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
22769 N->getValueType(0) == MVT::i32 &&
22770 InputVector.getValueType() == MVT::v2i32) {
22772 // The bitcast source is a direct mmx result.
22773 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
22774 if (MMXSrc.getValueType() == MVT::x86mmx)
22775 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
22776 N->getValueType(0),
22777 InputVector.getNode()->getOperand(0));
22779 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
22780 SDValue MMXSrcOp = MMXSrc.getOperand(0);
22781 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
22782 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
22783 MMXSrcOp.getOpcode() == ISD::BITCAST &&
22784 MMXSrcOp.getValueType() == MVT::v1i64 &&
22785 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
22786 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
22787 N->getValueType(0),
22788 MMXSrcOp.getOperand(0));
22791 // Only operate on vectors of 4 elements, where the alternative shuffling
22792 // gets to be more expensive.
22793 if (InputVector.getValueType() != MVT::v4i32)
22796 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
22797 // single use which is a sign-extend or zero-extend, and all elements are
22799 SmallVector<SDNode *, 4> Uses;
22800 unsigned ExtractedElements = 0;
22801 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
22802 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
22803 if (UI.getUse().getResNo() != InputVector.getResNo())
22806 SDNode *Extract = *UI;
22807 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
22810 if (Extract->getValueType(0) != MVT::i32)
22812 if (!Extract->hasOneUse())
22814 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
22815 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
22817 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
22820 // Record which element was extracted.
22821 ExtractedElements |=
22822 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
22824 Uses.push_back(Extract);
22827 // If not all the elements were used, this may not be worthwhile.
22828 if (ExtractedElements != 15)
22831 // Ok, we've now decided to do the transformation.
22832 // If 64-bit shifts are legal, use the extract-shift sequence,
22833 // otherwise bounce the vector off the cache.
22834 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22836 SDLoc dl(InputVector);
22838 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
22839 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
22840 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
22841 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
22842 DAG.getConstant(0, VecIdxTy));
22843 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
22844 DAG.getConstant(1, VecIdxTy));
22846 SDValue ShAmt = DAG.getConstant(32,
22847 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
22848 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
22849 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
22850 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
22851 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
22852 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
22853 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
22855 // Store the value to a temporary stack slot.
22856 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
22857 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
22858 MachinePointerInfo(), false, false, 0);
22860 EVT ElementType = InputVector.getValueType().getVectorElementType();
22861 unsigned EltSize = ElementType.getSizeInBits() / 8;
22863 // Replace each use (extract) with a load of the appropriate element.
22864 for (unsigned i = 0; i < 4; ++i) {
22865 uint64_t Offset = EltSize * i;
22866 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
22868 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
22869 StackPtr, OffsetVal);
22871 // Load the scalar.
22872 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
22873 ScalarAddr, MachinePointerInfo(),
22874 false, false, false, 0);
22879 // Replace the extracts
22880 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
22881 UE = Uses.end(); UI != UE; ++UI) {
22882 SDNode *Extract = *UI;
22884 SDValue Idx = Extract->getOperand(1);
22885 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
22886 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
22889 // The replacement was made in place; don't return anything.
22893 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
22894 static std::pair<unsigned, bool>
22895 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
22896 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
22897 if (!VT.isVector())
22898 return std::make_pair(0, false);
22900 bool NeedSplit = false;
22901 switch (VT.getSimpleVT().SimpleTy) {
22902 default: return std::make_pair(0, false);
22905 if (!Subtarget->hasVLX())
22906 return std::make_pair(0, false);
22910 if (!Subtarget->hasBWI())
22911 return std::make_pair(0, false);
22915 if (!Subtarget->hasAVX512())
22916 return std::make_pair(0, false);
22921 if (!Subtarget->hasAVX2())
22923 if (!Subtarget->hasAVX())
22924 return std::make_pair(0, false);
22929 if (!Subtarget->hasSSE2())
22930 return std::make_pair(0, false);
22933 // SSE2 has only a small subset of the operations.
22934 bool hasUnsigned = Subtarget->hasSSE41() ||
22935 (Subtarget->hasSSE2() && VT == MVT::v16i8);
22936 bool hasSigned = Subtarget->hasSSE41() ||
22937 (Subtarget->hasSSE2() && VT == MVT::v8i16);
22939 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
22942 // Check for x CC y ? x : y.
22943 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
22944 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
22949 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
22952 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
22955 Opc = hasSigned ? X86ISD::SMIN : 0; break;
22958 Opc = hasSigned ? X86ISD::SMAX : 0; break;
22960 // Check for x CC y ? y : x -- a min/max with reversed arms.
22961 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
22962 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
22967 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
22970 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
22973 Opc = hasSigned ? X86ISD::SMAX : 0; break;
22976 Opc = hasSigned ? X86ISD::SMIN : 0; break;
22980 return std::make_pair(Opc, NeedSplit);
22984 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
22985 const X86Subtarget *Subtarget) {
22987 SDValue Cond = N->getOperand(0);
22988 SDValue LHS = N->getOperand(1);
22989 SDValue RHS = N->getOperand(2);
22991 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
22992 SDValue CondSrc = Cond->getOperand(0);
22993 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
22994 Cond = CondSrc->getOperand(0);
22997 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23000 // A vselect where all conditions and data are constants can be optimized into
23001 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23002 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23003 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23006 unsigned MaskValue = 0;
23007 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23010 MVT VT = N->getSimpleValueType(0);
23011 unsigned NumElems = VT.getVectorNumElements();
23012 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23013 for (unsigned i = 0; i < NumElems; ++i) {
23014 // Be sure we emit undef where we can.
23015 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23016 ShuffleMask[i] = -1;
23018 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23021 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23022 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23024 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23027 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23029 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23030 TargetLowering::DAGCombinerInfo &DCI,
23031 const X86Subtarget *Subtarget) {
23033 SDValue Cond = N->getOperand(0);
23034 // Get the LHS/RHS of the select.
23035 SDValue LHS = N->getOperand(1);
23036 SDValue RHS = N->getOperand(2);
23037 EVT VT = LHS.getValueType();
23038 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23040 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23041 // instructions match the semantics of the common C idiom x<y?x:y but not
23042 // x<=y?x:y, because of how they handle negative zero (which can be
23043 // ignored in unsafe-math mode).
23044 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23045 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23046 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23047 (Subtarget->hasSSE2() ||
23048 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23049 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23051 unsigned Opcode = 0;
23052 // Check for x CC y ? x : y.
23053 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23054 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23058 // Converting this to a min would handle NaNs incorrectly, and swapping
23059 // the operands would cause it to handle comparisons between positive
23060 // and negative zero incorrectly.
23061 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23062 if (!DAG.getTarget().Options.UnsafeFPMath &&
23063 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23065 std::swap(LHS, RHS);
23067 Opcode = X86ISD::FMIN;
23070 // Converting this to a min would handle comparisons between positive
23071 // and negative zero incorrectly.
23072 if (!DAG.getTarget().Options.UnsafeFPMath &&
23073 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23075 Opcode = X86ISD::FMIN;
23078 // Converting this to a min would handle both negative zeros and NaNs
23079 // incorrectly, but we can swap the operands to fix both.
23080 std::swap(LHS, RHS);
23084 Opcode = X86ISD::FMIN;
23088 // Converting this to a max would handle comparisons between positive
23089 // and negative zero incorrectly.
23090 if (!DAG.getTarget().Options.UnsafeFPMath &&
23091 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23093 Opcode = X86ISD::FMAX;
23096 // Converting this to a max would handle NaNs incorrectly, and swapping
23097 // the operands would cause it to handle comparisons between positive
23098 // and negative zero incorrectly.
23099 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23100 if (!DAG.getTarget().Options.UnsafeFPMath &&
23101 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23103 std::swap(LHS, RHS);
23105 Opcode = X86ISD::FMAX;
23108 // Converting this to a max would handle both negative zeros and NaNs
23109 // incorrectly, but we can swap the operands to fix both.
23110 std::swap(LHS, RHS);
23114 Opcode = X86ISD::FMAX;
23117 // Check for x CC y ? y : x -- a min/max with reversed arms.
23118 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23119 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23123 // Converting this to a min would handle comparisons between positive
23124 // and negative zero incorrectly, and swapping the operands would
23125 // cause it to handle NaNs incorrectly.
23126 if (!DAG.getTarget().Options.UnsafeFPMath &&
23127 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23128 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23130 std::swap(LHS, RHS);
23132 Opcode = X86ISD::FMIN;
23135 // Converting this to a min would handle NaNs incorrectly.
23136 if (!DAG.getTarget().Options.UnsafeFPMath &&
23137 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23139 Opcode = X86ISD::FMIN;
23142 // Converting this to a min would handle both negative zeros and NaNs
23143 // incorrectly, but we can swap the operands to fix both.
23144 std::swap(LHS, RHS);
23148 Opcode = X86ISD::FMIN;
23152 // Converting this to a max would handle NaNs incorrectly.
23153 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23155 Opcode = X86ISD::FMAX;
23158 // Converting this to a max would handle comparisons between positive
23159 // and negative zero incorrectly, and swapping the operands would
23160 // cause it to handle NaNs incorrectly.
23161 if (!DAG.getTarget().Options.UnsafeFPMath &&
23162 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23163 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23165 std::swap(LHS, RHS);
23167 Opcode = X86ISD::FMAX;
23170 // Converting this to a max would handle both negative zeros and NaNs
23171 // incorrectly, but we can swap the operands to fix both.
23172 std::swap(LHS, RHS);
23176 Opcode = X86ISD::FMAX;
23182 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23185 EVT CondVT = Cond.getValueType();
23186 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23187 CondVT.getVectorElementType() == MVT::i1) {
23188 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23189 // lowering on KNL. In this case we convert it to
23190 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23191 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23192 // Since SKX these selects have a proper lowering.
23193 EVT OpVT = LHS.getValueType();
23194 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23195 (OpVT.getVectorElementType() == MVT::i8 ||
23196 OpVT.getVectorElementType() == MVT::i16) &&
23197 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23198 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23199 DCI.AddToWorklist(Cond.getNode());
23200 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23203 // If this is a select between two integer constants, try to do some
23205 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23206 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23207 // Don't do this for crazy integer types.
23208 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23209 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23210 // so that TrueC (the true value) is larger than FalseC.
23211 bool NeedsCondInvert = false;
23213 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23214 // Efficiently invertible.
23215 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23216 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23217 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23218 NeedsCondInvert = true;
23219 std::swap(TrueC, FalseC);
23222 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23223 if (FalseC->getAPIntValue() == 0 &&
23224 TrueC->getAPIntValue().isPowerOf2()) {
23225 if (NeedsCondInvert) // Invert the condition if needed.
23226 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23227 DAG.getConstant(1, Cond.getValueType()));
23229 // Zero extend the condition if needed.
23230 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23232 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23233 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23234 DAG.getConstant(ShAmt, MVT::i8));
23237 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23238 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23239 if (NeedsCondInvert) // Invert the condition if needed.
23240 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23241 DAG.getConstant(1, Cond.getValueType()));
23243 // Zero extend the condition if needed.
23244 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23245 FalseC->getValueType(0), Cond);
23246 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23247 SDValue(FalseC, 0));
23250 // Optimize cases that will turn into an LEA instruction. This requires
23251 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23252 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23253 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23254 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23256 bool isFastMultiplier = false;
23258 switch ((unsigned char)Diff) {
23260 case 1: // result = add base, cond
23261 case 2: // result = lea base( , cond*2)
23262 case 3: // result = lea base(cond, cond*2)
23263 case 4: // result = lea base( , cond*4)
23264 case 5: // result = lea base(cond, cond*4)
23265 case 8: // result = lea base( , cond*8)
23266 case 9: // result = lea base(cond, cond*8)
23267 isFastMultiplier = true;
23272 if (isFastMultiplier) {
23273 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23274 if (NeedsCondInvert) // Invert the condition if needed.
23275 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23276 DAG.getConstant(1, Cond.getValueType()));
23278 // Zero extend the condition if needed.
23279 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23281 // Scale the condition by the difference.
23283 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23284 DAG.getConstant(Diff, Cond.getValueType()));
23286 // Add the base if non-zero.
23287 if (FalseC->getAPIntValue() != 0)
23288 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23289 SDValue(FalseC, 0));
23296 // Canonicalize max and min:
23297 // (x > y) ? x : y -> (x >= y) ? x : y
23298 // (x < y) ? x : y -> (x <= y) ? x : y
23299 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23300 // the need for an extra compare
23301 // against zero. e.g.
23302 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23304 // testl %edi, %edi
23306 // cmovgl %edi, %eax
23310 // cmovsl %eax, %edi
23311 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23312 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23313 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23314 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23319 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23320 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23321 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23322 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23327 // Early exit check
23328 if (!TLI.isTypeLegal(VT))
23331 // Match VSELECTs into subs with unsigned saturation.
23332 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23333 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23334 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23335 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23336 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23338 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23339 // left side invert the predicate to simplify logic below.
23341 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23343 CC = ISD::getSetCCInverse(CC, true);
23344 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23348 if (Other.getNode() && Other->getNumOperands() == 2 &&
23349 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23350 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23351 SDValue CondRHS = Cond->getOperand(1);
23353 // Look for a general sub with unsigned saturation first.
23354 // x >= y ? x-y : 0 --> subus x, y
23355 // x > y ? x-y : 0 --> subus x, y
23356 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23357 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23358 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23360 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23361 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23362 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23363 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23364 // If the RHS is a constant we have to reverse the const
23365 // canonicalization.
23366 // x > C-1 ? x+-C : 0 --> subus x, C
23367 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23368 CondRHSConst->getAPIntValue() ==
23369 (-OpRHSConst->getAPIntValue() - 1))
23370 return DAG.getNode(
23371 X86ISD::SUBUS, DL, VT, OpLHS,
23372 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23374 // Another special case: If C was a sign bit, the sub has been
23375 // canonicalized into a xor.
23376 // FIXME: Would it be better to use computeKnownBits to determine
23377 // whether it's safe to decanonicalize the xor?
23378 // x s< 0 ? x^C : 0 --> subus x, C
23379 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23380 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23381 OpRHSConst->getAPIntValue().isSignBit())
23382 // Note that we have to rebuild the RHS constant here to ensure we
23383 // don't rely on particular values of undef lanes.
23384 return DAG.getNode(
23385 X86ISD::SUBUS, DL, VT, OpLHS,
23386 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23391 // Try to match a min/max vector operation.
23392 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23393 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23394 unsigned Opc = ret.first;
23395 bool NeedSplit = ret.second;
23397 if (Opc && NeedSplit) {
23398 unsigned NumElems = VT.getVectorNumElements();
23399 // Extract the LHS vectors
23400 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23401 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23403 // Extract the RHS vectors
23404 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23405 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23407 // Create min/max for each subvector
23408 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23409 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23411 // Merge the result
23412 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23414 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23417 // Simplify vector selection if condition value type matches vselect
23419 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23420 assert(Cond.getValueType().isVector() &&
23421 "vector select expects a vector selector!");
23423 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23424 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23426 // Try invert the condition if true value is not all 1s and false value
23428 if (!TValIsAllOnes && !FValIsAllZeros &&
23429 // Check if the selector will be produced by CMPP*/PCMP*
23430 Cond.getOpcode() == ISD::SETCC &&
23431 // Check if SETCC has already been promoted
23432 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23433 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23434 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23436 if (TValIsAllZeros || FValIsAllOnes) {
23437 SDValue CC = Cond.getOperand(2);
23438 ISD::CondCode NewCC =
23439 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23440 Cond.getOperand(0).getValueType().isInteger());
23441 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23442 std::swap(LHS, RHS);
23443 TValIsAllOnes = FValIsAllOnes;
23444 FValIsAllZeros = TValIsAllZeros;
23448 if (TValIsAllOnes || FValIsAllZeros) {
23451 if (TValIsAllOnes && FValIsAllZeros)
23453 else if (TValIsAllOnes)
23454 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23455 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23456 else if (FValIsAllZeros)
23457 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23458 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23460 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23464 // If we know that this node is legal then we know that it is going to be
23465 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23466 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23467 // to simplify previous instructions.
23468 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23469 !DCI.isBeforeLegalize() &&
23470 // We explicitly check against v8i16 and v16i16 because, although
23471 // they're marked as Custom, they might only be legal when Cond is a
23472 // build_vector of constants. This will be taken care in a later
23474 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
23475 VT != MVT::v8i16) &&
23476 // Don't optimize vector of constants. Those are handled by
23477 // the generic code and all the bits must be properly set for
23478 // the generic optimizer.
23479 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
23480 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
23482 // Don't optimize vector selects that map to mask-registers.
23486 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
23487 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
23489 APInt KnownZero, KnownOne;
23490 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
23491 DCI.isBeforeLegalizeOps());
23492 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
23493 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
23495 // If we changed the computation somewhere in the DAG, this change
23496 // will affect all users of Cond.
23497 // Make sure it is fine and update all the nodes so that we do not
23498 // use the generic VSELECT anymore. Otherwise, we may perform
23499 // wrong optimizations as we messed up with the actual expectation
23500 // for the vector boolean values.
23501 if (Cond != TLO.Old) {
23502 // Check all uses of that condition operand to check whether it will be
23503 // consumed by non-BLEND instructions, which may depend on all bits are
23505 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23507 if (I->getOpcode() != ISD::VSELECT)
23508 // TODO: Add other opcodes eventually lowered into BLEND.
23511 // Update all the users of the condition, before committing the change,
23512 // so that the VSELECT optimizations that expect the correct vector
23513 // boolean value will not be triggered.
23514 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23516 DAG.ReplaceAllUsesOfValueWith(
23518 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
23519 Cond, I->getOperand(1), I->getOperand(2)));
23520 DCI.CommitTargetLoweringOpt(TLO);
23523 // At this point, only Cond is changed. Change the condition
23524 // just for N to keep the opportunity to optimize all other
23525 // users their own way.
23526 DAG.ReplaceAllUsesOfValueWith(
23528 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
23529 TLO.New, N->getOperand(1), N->getOperand(2)));
23534 // We should generate an X86ISD::BLENDI from a vselect if its argument
23535 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
23536 // constants. This specific pattern gets generated when we split a
23537 // selector for a 512 bit vector in a machine without AVX512 (but with
23538 // 256-bit vectors), during legalization:
23540 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
23542 // Iff we find this pattern and the build_vectors are built from
23543 // constants, we translate the vselect into a shuffle_vector that we
23544 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
23545 if ((N->getOpcode() == ISD::VSELECT ||
23546 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
23547 !DCI.isBeforeLegalize()) {
23548 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
23549 if (Shuffle.getNode())
23556 // Check whether a boolean test is testing a boolean value generated by
23557 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
23560 // Simplify the following patterns:
23561 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
23562 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
23563 // to (Op EFLAGS Cond)
23565 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
23566 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
23567 // to (Op EFLAGS !Cond)
23569 // where Op could be BRCOND or CMOV.
23571 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
23572 // Quit if not CMP and SUB with its value result used.
23573 if (Cmp.getOpcode() != X86ISD::CMP &&
23574 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
23577 // Quit if not used as a boolean value.
23578 if (CC != X86::COND_E && CC != X86::COND_NE)
23581 // Check CMP operands. One of them should be 0 or 1 and the other should be
23582 // an SetCC or extended from it.
23583 SDValue Op1 = Cmp.getOperand(0);
23584 SDValue Op2 = Cmp.getOperand(1);
23587 const ConstantSDNode* C = nullptr;
23588 bool needOppositeCond = (CC == X86::COND_E);
23589 bool checkAgainstTrue = false; // Is it a comparison against 1?
23591 if ((C = dyn_cast<ConstantSDNode>(Op1)))
23593 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
23595 else // Quit if all operands are not constants.
23598 if (C->getZExtValue() == 1) {
23599 needOppositeCond = !needOppositeCond;
23600 checkAgainstTrue = true;
23601 } else if (C->getZExtValue() != 0)
23602 // Quit if the constant is neither 0 or 1.
23605 bool truncatedToBoolWithAnd = false;
23606 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
23607 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
23608 SetCC.getOpcode() == ISD::TRUNCATE ||
23609 SetCC.getOpcode() == ISD::AND) {
23610 if (SetCC.getOpcode() == ISD::AND) {
23612 ConstantSDNode *CS;
23613 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
23614 CS->getZExtValue() == 1)
23616 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
23617 CS->getZExtValue() == 1)
23621 SetCC = SetCC.getOperand(OpIdx);
23622 truncatedToBoolWithAnd = true;
23624 SetCC = SetCC.getOperand(0);
23627 switch (SetCC.getOpcode()) {
23628 case X86ISD::SETCC_CARRY:
23629 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
23630 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
23631 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
23632 // truncated to i1 using 'and'.
23633 if (checkAgainstTrue && !truncatedToBoolWithAnd)
23635 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
23636 "Invalid use of SETCC_CARRY!");
23638 case X86ISD::SETCC:
23639 // Set the condition code or opposite one if necessary.
23640 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
23641 if (needOppositeCond)
23642 CC = X86::GetOppositeBranchCondition(CC);
23643 return SetCC.getOperand(1);
23644 case X86ISD::CMOV: {
23645 // Check whether false/true value has canonical one, i.e. 0 or 1.
23646 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
23647 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
23648 // Quit if true value is not a constant.
23651 // Quit if false value is not a constant.
23653 SDValue Op = SetCC.getOperand(0);
23654 // Skip 'zext' or 'trunc' node.
23655 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
23656 Op.getOpcode() == ISD::TRUNCATE)
23657 Op = Op.getOperand(0);
23658 // A special case for rdrand/rdseed, where 0 is set if false cond is
23660 if ((Op.getOpcode() != X86ISD::RDRAND &&
23661 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
23664 // Quit if false value is not the constant 0 or 1.
23665 bool FValIsFalse = true;
23666 if (FVal && FVal->getZExtValue() != 0) {
23667 if (FVal->getZExtValue() != 1)
23669 // If FVal is 1, opposite cond is needed.
23670 needOppositeCond = !needOppositeCond;
23671 FValIsFalse = false;
23673 // Quit if TVal is not the constant opposite of FVal.
23674 if (FValIsFalse && TVal->getZExtValue() != 1)
23676 if (!FValIsFalse && TVal->getZExtValue() != 0)
23678 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
23679 if (needOppositeCond)
23680 CC = X86::GetOppositeBranchCondition(CC);
23681 return SetCC.getOperand(3);
23688 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
23689 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
23690 TargetLowering::DAGCombinerInfo &DCI,
23691 const X86Subtarget *Subtarget) {
23694 // If the flag operand isn't dead, don't touch this CMOV.
23695 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
23698 SDValue FalseOp = N->getOperand(0);
23699 SDValue TrueOp = N->getOperand(1);
23700 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
23701 SDValue Cond = N->getOperand(3);
23703 if (CC == X86::COND_E || CC == X86::COND_NE) {
23704 switch (Cond.getOpcode()) {
23708 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
23709 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
23710 return (CC == X86::COND_E) ? FalseOp : TrueOp;
23716 Flags = checkBoolTestSetCCCombine(Cond, CC);
23717 if (Flags.getNode() &&
23718 // Extra check as FCMOV only supports a subset of X86 cond.
23719 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
23720 SDValue Ops[] = { FalseOp, TrueOp,
23721 DAG.getConstant(CC, MVT::i8), Flags };
23722 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
23725 // If this is a select between two integer constants, try to do some
23726 // optimizations. Note that the operands are ordered the opposite of SELECT
23728 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
23729 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
23730 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
23731 // larger than FalseC (the false value).
23732 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
23733 CC = X86::GetOppositeBranchCondition(CC);
23734 std::swap(TrueC, FalseC);
23735 std::swap(TrueOp, FalseOp);
23738 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
23739 // This is efficient for any integer data type (including i8/i16) and
23741 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
23742 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23743 DAG.getConstant(CC, MVT::i8), Cond);
23745 // Zero extend the condition if needed.
23746 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
23748 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23749 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
23750 DAG.getConstant(ShAmt, MVT::i8));
23751 if (N->getNumValues() == 2) // Dead flag value?
23752 return DCI.CombineTo(N, Cond, SDValue());
23756 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
23757 // for any integer data type, including i8/i16.
23758 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23759 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23760 DAG.getConstant(CC, MVT::i8), Cond);
23762 // Zero extend the condition if needed.
23763 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23764 FalseC->getValueType(0), Cond);
23765 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23766 SDValue(FalseC, 0));
23768 if (N->getNumValues() == 2) // Dead flag value?
23769 return DCI.CombineTo(N, Cond, SDValue());
23773 // Optimize cases that will turn into an LEA instruction. This requires
23774 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23775 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23776 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23777 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23779 bool isFastMultiplier = false;
23781 switch ((unsigned char)Diff) {
23783 case 1: // result = add base, cond
23784 case 2: // result = lea base( , cond*2)
23785 case 3: // result = lea base(cond, cond*2)
23786 case 4: // result = lea base( , cond*4)
23787 case 5: // result = lea base(cond, cond*4)
23788 case 8: // result = lea base( , cond*8)
23789 case 9: // result = lea base(cond, cond*8)
23790 isFastMultiplier = true;
23795 if (isFastMultiplier) {
23796 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23797 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
23798 DAG.getConstant(CC, MVT::i8), Cond);
23799 // Zero extend the condition if needed.
23800 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23802 // Scale the condition by the difference.
23804 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23805 DAG.getConstant(Diff, Cond.getValueType()));
23807 // Add the base if non-zero.
23808 if (FalseC->getAPIntValue() != 0)
23809 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23810 SDValue(FalseC, 0));
23811 if (N->getNumValues() == 2) // Dead flag value?
23812 return DCI.CombineTo(N, Cond, SDValue());
23819 // Handle these cases:
23820 // (select (x != c), e, c) -> select (x != c), e, x),
23821 // (select (x == c), c, e) -> select (x == c), x, e)
23822 // where the c is an integer constant, and the "select" is the combination
23823 // of CMOV and CMP.
23825 // The rationale for this change is that the conditional-move from a constant
23826 // needs two instructions, however, conditional-move from a register needs
23827 // only one instruction.
23829 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
23830 // some instruction-combining opportunities. This opt needs to be
23831 // postponed as late as possible.
23833 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
23834 // the DCI.xxxx conditions are provided to postpone the optimization as
23835 // late as possible.
23837 ConstantSDNode *CmpAgainst = nullptr;
23838 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
23839 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
23840 !isa<ConstantSDNode>(Cond.getOperand(0))) {
23842 if (CC == X86::COND_NE &&
23843 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
23844 CC = X86::GetOppositeBranchCondition(CC);
23845 std::swap(TrueOp, FalseOp);
23848 if (CC == X86::COND_E &&
23849 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
23850 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
23851 DAG.getConstant(CC, MVT::i8), Cond };
23852 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
23860 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
23861 const X86Subtarget *Subtarget) {
23862 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
23864 default: return SDValue();
23865 // SSE/AVX/AVX2 blend intrinsics.
23866 case Intrinsic::x86_avx2_pblendvb:
23867 case Intrinsic::x86_avx2_pblendw:
23868 case Intrinsic::x86_avx2_pblendd_128:
23869 case Intrinsic::x86_avx2_pblendd_256:
23870 // Don't try to simplify this intrinsic if we don't have AVX2.
23871 if (!Subtarget->hasAVX2())
23874 case Intrinsic::x86_avx_blend_pd_256:
23875 case Intrinsic::x86_avx_blend_ps_256:
23876 case Intrinsic::x86_avx_blendv_pd_256:
23877 case Intrinsic::x86_avx_blendv_ps_256:
23878 // Don't try to simplify this intrinsic if we don't have AVX.
23879 if (!Subtarget->hasAVX())
23882 case Intrinsic::x86_sse41_pblendw:
23883 case Intrinsic::x86_sse41_blendpd:
23884 case Intrinsic::x86_sse41_blendps:
23885 case Intrinsic::x86_sse41_blendvps:
23886 case Intrinsic::x86_sse41_blendvpd:
23887 case Intrinsic::x86_sse41_pblendvb: {
23888 SDValue Op0 = N->getOperand(1);
23889 SDValue Op1 = N->getOperand(2);
23890 SDValue Mask = N->getOperand(3);
23892 // Don't try to simplify this intrinsic if we don't have SSE4.1.
23893 if (!Subtarget->hasSSE41())
23896 // fold (blend A, A, Mask) -> A
23899 // fold (blend A, B, allZeros) -> A
23900 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
23902 // fold (blend A, B, allOnes) -> B
23903 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
23906 // Simplify the case where the mask is a constant i32 value.
23907 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
23908 if (C->isNullValue())
23910 if (C->isAllOnesValue())
23917 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
23918 case Intrinsic::x86_sse2_psrai_w:
23919 case Intrinsic::x86_sse2_psrai_d:
23920 case Intrinsic::x86_avx2_psrai_w:
23921 case Intrinsic::x86_avx2_psrai_d:
23922 case Intrinsic::x86_sse2_psra_w:
23923 case Intrinsic::x86_sse2_psra_d:
23924 case Intrinsic::x86_avx2_psra_w:
23925 case Intrinsic::x86_avx2_psra_d: {
23926 SDValue Op0 = N->getOperand(1);
23927 SDValue Op1 = N->getOperand(2);
23928 EVT VT = Op0.getValueType();
23929 assert(VT.isVector() && "Expected a vector type!");
23931 if (isa<BuildVectorSDNode>(Op1))
23932 Op1 = Op1.getOperand(0);
23934 if (!isa<ConstantSDNode>(Op1))
23937 EVT SVT = VT.getVectorElementType();
23938 unsigned SVTBits = SVT.getSizeInBits();
23940 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
23941 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
23942 uint64_t ShAmt = C.getZExtValue();
23944 // Don't try to convert this shift into a ISD::SRA if the shift
23945 // count is bigger than or equal to the element size.
23946 if (ShAmt >= SVTBits)
23949 // Trivial case: if the shift count is zero, then fold this
23950 // into the first operand.
23954 // Replace this packed shift intrinsic with a target independent
23956 SDValue Splat = DAG.getConstant(C, VT);
23957 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
23962 /// PerformMulCombine - Optimize a single multiply with constant into two
23963 /// in order to implement it with two cheaper instructions, e.g.
23964 /// LEA + SHL, LEA + LEA.
23965 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
23966 TargetLowering::DAGCombinerInfo &DCI) {
23967 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
23970 EVT VT = N->getValueType(0);
23971 if (VT != MVT::i64 && VT != MVT::i32)
23974 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
23977 uint64_t MulAmt = C->getZExtValue();
23978 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
23981 uint64_t MulAmt1 = 0;
23982 uint64_t MulAmt2 = 0;
23983 if ((MulAmt % 9) == 0) {
23985 MulAmt2 = MulAmt / 9;
23986 } else if ((MulAmt % 5) == 0) {
23988 MulAmt2 = MulAmt / 5;
23989 } else if ((MulAmt % 3) == 0) {
23991 MulAmt2 = MulAmt / 3;
23994 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
23997 if (isPowerOf2_64(MulAmt2) &&
23998 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
23999 // If second multiplifer is pow2, issue it first. We want the multiply by
24000 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24002 std::swap(MulAmt1, MulAmt2);
24005 if (isPowerOf2_64(MulAmt1))
24006 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24007 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24009 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24010 DAG.getConstant(MulAmt1, VT));
24012 if (isPowerOf2_64(MulAmt2))
24013 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24014 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24016 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24017 DAG.getConstant(MulAmt2, VT));
24019 // Do not add new nodes to DAG combiner worklist.
24020 DCI.CombineTo(N, NewMul, false);
24025 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24026 SDValue N0 = N->getOperand(0);
24027 SDValue N1 = N->getOperand(1);
24028 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24029 EVT VT = N0.getValueType();
24031 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24032 // since the result of setcc_c is all zero's or all ones.
24033 if (VT.isInteger() && !VT.isVector() &&
24034 N1C && N0.getOpcode() == ISD::AND &&
24035 N0.getOperand(1).getOpcode() == ISD::Constant) {
24036 SDValue N00 = N0.getOperand(0);
24037 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24038 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24039 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24040 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24041 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24042 APInt ShAmt = N1C->getAPIntValue();
24043 Mask = Mask.shl(ShAmt);
24045 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24046 N00, DAG.getConstant(Mask, VT));
24050 // Hardware support for vector shifts is sparse which makes us scalarize the
24051 // vector operations in many cases. Also, on sandybridge ADD is faster than
24053 // (shl V, 1) -> add V,V
24054 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24055 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24056 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24057 // We shift all of the values by one. In many cases we do not have
24058 // hardware support for this operation. This is better expressed as an ADD
24060 if (N1SplatC->getZExtValue() == 1)
24061 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24067 /// \brief Returns a vector of 0s if the node in input is a vector logical
24068 /// shift by a constant amount which is known to be bigger than or equal
24069 /// to the vector element size in bits.
24070 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24071 const X86Subtarget *Subtarget) {
24072 EVT VT = N->getValueType(0);
24074 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24075 (!Subtarget->hasInt256() ||
24076 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24079 SDValue Amt = N->getOperand(1);
24081 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24082 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24083 APInt ShiftAmt = AmtSplat->getAPIntValue();
24084 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24086 // SSE2/AVX2 logical shifts always return a vector of 0s
24087 // if the shift amount is bigger than or equal to
24088 // the element size. The constant shift amount will be
24089 // encoded as a 8-bit immediate.
24090 if (ShiftAmt.trunc(8).uge(MaxAmount))
24091 return getZeroVector(VT, Subtarget, DAG, DL);
24097 /// PerformShiftCombine - Combine shifts.
24098 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24099 TargetLowering::DAGCombinerInfo &DCI,
24100 const X86Subtarget *Subtarget) {
24101 if (N->getOpcode() == ISD::SHL) {
24102 SDValue V = PerformSHLCombine(N, DAG);
24103 if (V.getNode()) return V;
24106 if (N->getOpcode() != ISD::SRA) {
24107 // Try to fold this logical shift into a zero vector.
24108 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24109 if (V.getNode()) return V;
24115 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24116 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24117 // and friends. Likewise for OR -> CMPNEQSS.
24118 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24119 TargetLowering::DAGCombinerInfo &DCI,
24120 const X86Subtarget *Subtarget) {
24123 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24124 // we're requiring SSE2 for both.
24125 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24126 SDValue N0 = N->getOperand(0);
24127 SDValue N1 = N->getOperand(1);
24128 SDValue CMP0 = N0->getOperand(1);
24129 SDValue CMP1 = N1->getOperand(1);
24132 // The SETCCs should both refer to the same CMP.
24133 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24136 SDValue CMP00 = CMP0->getOperand(0);
24137 SDValue CMP01 = CMP0->getOperand(1);
24138 EVT VT = CMP00.getValueType();
24140 if (VT == MVT::f32 || VT == MVT::f64) {
24141 bool ExpectingFlags = false;
24142 // Check for any users that want flags:
24143 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24144 !ExpectingFlags && UI != UE; ++UI)
24145 switch (UI->getOpcode()) {
24150 ExpectingFlags = true;
24152 case ISD::CopyToReg:
24153 case ISD::SIGN_EXTEND:
24154 case ISD::ZERO_EXTEND:
24155 case ISD::ANY_EXTEND:
24159 if (!ExpectingFlags) {
24160 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24161 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24163 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24164 X86::CondCode tmp = cc0;
24169 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24170 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24171 // FIXME: need symbolic constants for these magic numbers.
24172 // See X86ATTInstPrinter.cpp:printSSECC().
24173 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24174 if (Subtarget->hasAVX512()) {
24175 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24176 CMP01, DAG.getConstant(x86cc, MVT::i8));
24177 if (N->getValueType(0) != MVT::i1)
24178 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24182 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24183 CMP00.getValueType(), CMP00, CMP01,
24184 DAG.getConstant(x86cc, MVT::i8));
24186 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24187 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24189 if (is64BitFP && !Subtarget->is64Bit()) {
24190 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24191 // 64-bit integer, since that's not a legal type. Since
24192 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24193 // bits, but can do this little dance to extract the lowest 32 bits
24194 // and work with those going forward.
24195 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24197 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24199 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24200 Vector32, DAG.getIntPtrConstant(0));
24204 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24205 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24206 DAG.getConstant(1, IntVT));
24207 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24208 return OneBitOfTruth;
24216 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24217 /// so it can be folded inside ANDNP.
24218 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24219 EVT VT = N->getValueType(0);
24221 // Match direct AllOnes for 128 and 256-bit vectors
24222 if (ISD::isBuildVectorAllOnes(N))
24225 // Look through a bit convert.
24226 if (N->getOpcode() == ISD::BITCAST)
24227 N = N->getOperand(0).getNode();
24229 // Sometimes the operand may come from a insert_subvector building a 256-bit
24231 if (VT.is256BitVector() &&
24232 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24233 SDValue V1 = N->getOperand(0);
24234 SDValue V2 = N->getOperand(1);
24236 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24237 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24238 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24239 ISD::isBuildVectorAllOnes(V2.getNode()))
24246 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24247 // register. In most cases we actually compare or select YMM-sized registers
24248 // and mixing the two types creates horrible code. This method optimizes
24249 // some of the transition sequences.
24250 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24251 TargetLowering::DAGCombinerInfo &DCI,
24252 const X86Subtarget *Subtarget) {
24253 EVT VT = N->getValueType(0);
24254 if (!VT.is256BitVector())
24257 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24258 N->getOpcode() == ISD::ZERO_EXTEND ||
24259 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24261 SDValue Narrow = N->getOperand(0);
24262 EVT NarrowVT = Narrow->getValueType(0);
24263 if (!NarrowVT.is128BitVector())
24266 if (Narrow->getOpcode() != ISD::XOR &&
24267 Narrow->getOpcode() != ISD::AND &&
24268 Narrow->getOpcode() != ISD::OR)
24271 SDValue N0 = Narrow->getOperand(0);
24272 SDValue N1 = Narrow->getOperand(1);
24275 // The Left side has to be a trunc.
24276 if (N0.getOpcode() != ISD::TRUNCATE)
24279 // The type of the truncated inputs.
24280 EVT WideVT = N0->getOperand(0)->getValueType(0);
24284 // The right side has to be a 'trunc' or a constant vector.
24285 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24286 ConstantSDNode *RHSConstSplat = nullptr;
24287 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24288 RHSConstSplat = RHSBV->getConstantSplatNode();
24289 if (!RHSTrunc && !RHSConstSplat)
24292 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24294 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24297 // Set N0 and N1 to hold the inputs to the new wide operation.
24298 N0 = N0->getOperand(0);
24299 if (RHSConstSplat) {
24300 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24301 SDValue(RHSConstSplat, 0));
24302 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24303 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24304 } else if (RHSTrunc) {
24305 N1 = N1->getOperand(0);
24308 // Generate the wide operation.
24309 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24310 unsigned Opcode = N->getOpcode();
24312 case ISD::ANY_EXTEND:
24314 case ISD::ZERO_EXTEND: {
24315 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24316 APInt Mask = APInt::getAllOnesValue(InBits);
24317 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24318 return DAG.getNode(ISD::AND, DL, VT,
24319 Op, DAG.getConstant(Mask, VT));
24321 case ISD::SIGN_EXTEND:
24322 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24323 Op, DAG.getValueType(NarrowVT));
24325 llvm_unreachable("Unexpected opcode");
24329 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24330 TargetLowering::DAGCombinerInfo &DCI,
24331 const X86Subtarget *Subtarget) {
24332 EVT VT = N->getValueType(0);
24333 if (DCI.isBeforeLegalizeOps())
24336 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24340 // Create BEXTR instructions
24341 // BEXTR is ((X >> imm) & (2**size-1))
24342 if (VT == MVT::i32 || VT == MVT::i64) {
24343 SDValue N0 = N->getOperand(0);
24344 SDValue N1 = N->getOperand(1);
24347 // Check for BEXTR.
24348 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24349 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24350 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24351 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24352 if (MaskNode && ShiftNode) {
24353 uint64_t Mask = MaskNode->getZExtValue();
24354 uint64_t Shift = ShiftNode->getZExtValue();
24355 if (isMask_64(Mask)) {
24356 uint64_t MaskSize = CountPopulation_64(Mask);
24357 if (Shift + MaskSize <= VT.getSizeInBits())
24358 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24359 DAG.getConstant(Shift | (MaskSize << 8), VT));
24367 // Want to form ANDNP nodes:
24368 // 1) In the hopes of then easily combining them with OR and AND nodes
24369 // to form PBLEND/PSIGN.
24370 // 2) To match ANDN packed intrinsics
24371 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24374 SDValue N0 = N->getOperand(0);
24375 SDValue N1 = N->getOperand(1);
24378 // Check LHS for vnot
24379 if (N0.getOpcode() == ISD::XOR &&
24380 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24381 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24382 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24384 // Check RHS for vnot
24385 if (N1.getOpcode() == ISD::XOR &&
24386 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24387 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24388 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24393 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24394 TargetLowering::DAGCombinerInfo &DCI,
24395 const X86Subtarget *Subtarget) {
24396 if (DCI.isBeforeLegalizeOps())
24399 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24403 SDValue N0 = N->getOperand(0);
24404 SDValue N1 = N->getOperand(1);
24405 EVT VT = N->getValueType(0);
24407 // look for psign/blend
24408 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24409 if (!Subtarget->hasSSSE3() ||
24410 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24413 // Canonicalize pandn to RHS
24414 if (N0.getOpcode() == X86ISD::ANDNP)
24416 // or (and (m, y), (pandn m, x))
24417 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24418 SDValue Mask = N1.getOperand(0);
24419 SDValue X = N1.getOperand(1);
24421 if (N0.getOperand(0) == Mask)
24422 Y = N0.getOperand(1);
24423 if (N0.getOperand(1) == Mask)
24424 Y = N0.getOperand(0);
24426 // Check to see if the mask appeared in both the AND and ANDNP and
24430 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24431 // Look through mask bitcast.
24432 if (Mask.getOpcode() == ISD::BITCAST)
24433 Mask = Mask.getOperand(0);
24434 if (X.getOpcode() == ISD::BITCAST)
24435 X = X.getOperand(0);
24436 if (Y.getOpcode() == ISD::BITCAST)
24437 Y = Y.getOperand(0);
24439 EVT MaskVT = Mask.getValueType();
24441 // Validate that the Mask operand is a vector sra node.
24442 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24443 // there is no psrai.b
24444 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24445 unsigned SraAmt = ~0;
24446 if (Mask.getOpcode() == ISD::SRA) {
24447 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24448 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24449 SraAmt = AmtConst->getZExtValue();
24450 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24451 SDValue SraC = Mask.getOperand(1);
24452 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24454 if ((SraAmt + 1) != EltBits)
24459 // Now we know we at least have a plendvb with the mask val. See if
24460 // we can form a psignb/w/d.
24461 // psign = x.type == y.type == mask.type && y = sub(0, x);
24462 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24463 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24464 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24465 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24466 "Unsupported VT for PSIGN");
24467 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24468 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24470 // PBLENDVB only available on SSE 4.1
24471 if (!Subtarget->hasSSE41())
24474 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
24476 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
24477 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
24478 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
24479 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
24480 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24484 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
24487 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
24488 MachineFunction &MF = DAG.getMachineFunction();
24489 bool OptForSize = MF.getFunction()->getAttributes().
24490 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
24492 // SHLD/SHRD instructions have lower register pressure, but on some
24493 // platforms they have higher latency than the equivalent
24494 // series of shifts/or that would otherwise be generated.
24495 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
24496 // have higher latencies and we are not optimizing for size.
24497 if (!OptForSize && Subtarget->isSHLDSlow())
24500 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
24502 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
24504 if (!N0.hasOneUse() || !N1.hasOneUse())
24507 SDValue ShAmt0 = N0.getOperand(1);
24508 if (ShAmt0.getValueType() != MVT::i8)
24510 SDValue ShAmt1 = N1.getOperand(1);
24511 if (ShAmt1.getValueType() != MVT::i8)
24513 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
24514 ShAmt0 = ShAmt0.getOperand(0);
24515 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
24516 ShAmt1 = ShAmt1.getOperand(0);
24519 unsigned Opc = X86ISD::SHLD;
24520 SDValue Op0 = N0.getOperand(0);
24521 SDValue Op1 = N1.getOperand(0);
24522 if (ShAmt0.getOpcode() == ISD::SUB) {
24523 Opc = X86ISD::SHRD;
24524 std::swap(Op0, Op1);
24525 std::swap(ShAmt0, ShAmt1);
24528 unsigned Bits = VT.getSizeInBits();
24529 if (ShAmt1.getOpcode() == ISD::SUB) {
24530 SDValue Sum = ShAmt1.getOperand(0);
24531 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
24532 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
24533 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
24534 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
24535 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
24536 return DAG.getNode(Opc, DL, VT,
24538 DAG.getNode(ISD::TRUNCATE, DL,
24541 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
24542 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
24544 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
24545 return DAG.getNode(Opc, DL, VT,
24546 N0.getOperand(0), N1.getOperand(0),
24547 DAG.getNode(ISD::TRUNCATE, DL,
24554 // Generate NEG and CMOV for integer abs.
24555 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
24556 EVT VT = N->getValueType(0);
24558 // Since X86 does not have CMOV for 8-bit integer, we don't convert
24559 // 8-bit integer abs to NEG and CMOV.
24560 if (VT.isInteger() && VT.getSizeInBits() == 8)
24563 SDValue N0 = N->getOperand(0);
24564 SDValue N1 = N->getOperand(1);
24567 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
24568 // and change it to SUB and CMOV.
24569 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
24570 N0.getOpcode() == ISD::ADD &&
24571 N0.getOperand(1) == N1 &&
24572 N1.getOpcode() == ISD::SRA &&
24573 N1.getOperand(0) == N0.getOperand(0))
24574 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
24575 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
24576 // Generate SUB & CMOV.
24577 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
24578 DAG.getConstant(0, VT), N0.getOperand(0));
24580 SDValue Ops[] = { N0.getOperand(0), Neg,
24581 DAG.getConstant(X86::COND_GE, MVT::i8),
24582 SDValue(Neg.getNode(), 1) };
24583 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
24588 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
24589 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
24590 TargetLowering::DAGCombinerInfo &DCI,
24591 const X86Subtarget *Subtarget) {
24592 if (DCI.isBeforeLegalizeOps())
24595 if (Subtarget->hasCMov()) {
24596 SDValue RV = performIntegerAbsCombine(N, DAG);
24604 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
24605 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
24606 TargetLowering::DAGCombinerInfo &DCI,
24607 const X86Subtarget *Subtarget) {
24608 LoadSDNode *Ld = cast<LoadSDNode>(N);
24609 EVT RegVT = Ld->getValueType(0);
24610 EVT MemVT = Ld->getMemoryVT();
24612 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24614 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
24615 // into two 16-byte operations.
24616 ISD::LoadExtType Ext = Ld->getExtensionType();
24617 unsigned Alignment = Ld->getAlignment();
24618 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
24619 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
24620 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
24621 unsigned NumElems = RegVT.getVectorNumElements();
24625 SDValue Ptr = Ld->getBasePtr();
24626 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
24628 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
24630 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
24631 Ld->getPointerInfo(), Ld->isVolatile(),
24632 Ld->isNonTemporal(), Ld->isInvariant(),
24634 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
24635 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
24636 Ld->getPointerInfo(), Ld->isVolatile(),
24637 Ld->isNonTemporal(), Ld->isInvariant(),
24638 std::min(16U, Alignment));
24639 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
24641 Load2.getValue(1));
24643 SDValue NewVec = DAG.getUNDEF(RegVT);
24644 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
24645 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
24646 return DCI.CombineTo(N, NewVec, TF, true);
24652 /// PerformMLOADCombine - Resolve extending loads
24653 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
24654 TargetLowering::DAGCombinerInfo &DCI,
24655 const X86Subtarget *Subtarget) {
24656 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
24657 if (Mld->getExtensionType() != ISD::SEXTLOAD)
24660 EVT VT = Mld->getValueType(0);
24661 unsigned NumElems = VT.getVectorNumElements();
24662 EVT LdVT = Mld->getMemoryVT();
24665 assert(LdVT != VT && "Cannot extend to the same type");
24666 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
24667 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
24668 // From, To sizes and ElemCount must be pow of two
24669 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
24670 "Unexpected size for extending masked load");
24672 unsigned SizeRatio = ToSz / FromSz;
24673 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
24675 // Create a type on which we perform the shuffle
24676 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24677 LdVT.getScalarType(), NumElems*SizeRatio);
24678 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
24680 // Convert Src0 value
24681 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
24682 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
24683 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24684 for (unsigned i = 0; i != NumElems; ++i)
24685 ShuffleVec[i] = i * SizeRatio;
24687 // Can't shuffle using an illegal type.
24688 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
24689 && "WideVecVT should be legal");
24690 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
24691 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
24693 // Prepare the new mask
24695 SDValue Mask = Mld->getMask();
24696 if (Mask.getValueType() == VT) {
24697 // Mask and original value have the same type
24698 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
24699 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24700 for (unsigned i = 0; i != NumElems; ++i)
24701 ShuffleVec[i] = i * SizeRatio;
24702 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
24703 ShuffleVec[i] = NumElems*SizeRatio;
24704 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
24705 DAG.getConstant(0, WideVecVT),
24709 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
24710 unsigned WidenNumElts = NumElems*SizeRatio;
24711 unsigned MaskNumElts = VT.getVectorNumElements();
24712 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
24715 unsigned NumConcat = WidenNumElts / MaskNumElts;
24716 SmallVector<SDValue, 16> Ops(NumConcat);
24717 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
24719 for (unsigned i = 1; i != NumConcat; ++i)
24722 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
24725 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
24726 Mld->getBasePtr(), NewMask, WideSrc0,
24727 Mld->getMemoryVT(), Mld->getMemOperand(),
24729 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
24730 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
24733 /// PerformMSTORECombine - Resolve truncating stores
24734 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
24735 const X86Subtarget *Subtarget) {
24736 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
24737 if (!Mst->isTruncatingStore())
24740 EVT VT = Mst->getValue().getValueType();
24741 unsigned NumElems = VT.getVectorNumElements();
24742 EVT StVT = Mst->getMemoryVT();
24745 assert(StVT != VT && "Cannot truncate to the same type");
24746 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
24747 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
24749 // From, To sizes and ElemCount must be pow of two
24750 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
24751 "Unexpected size for truncating masked store");
24752 // We are going to use the original vector elt for storing.
24753 // Accumulated smaller vector elements must be a multiple of the store size.
24754 assert (((NumElems * FromSz) % ToSz) == 0 &&
24755 "Unexpected ratio for truncating masked store");
24757 unsigned SizeRatio = FromSz / ToSz;
24758 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
24760 // Create a type on which we perform the shuffle
24761 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24762 StVT.getScalarType(), NumElems*SizeRatio);
24764 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
24766 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
24767 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
24768 for (unsigned i = 0; i != NumElems; ++i)
24769 ShuffleVec[i] = i * SizeRatio;
24771 // Can't shuffle using an illegal type.
24772 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
24773 && "WideVecVT should be legal");
24775 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
24776 DAG.getUNDEF(WideVecVT),
24780 SDValue Mask = Mst->getMask();
24781 if (Mask.getValueType() == VT) {
24782 // Mask and original value have the same type
24783 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
24784 for (unsigned i = 0; i != NumElems; ++i)
24785 ShuffleVec[i] = i * SizeRatio;
24786 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
24787 ShuffleVec[i] = NumElems*SizeRatio;
24788 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
24789 DAG.getConstant(0, WideVecVT),
24793 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
24794 unsigned WidenNumElts = NumElems*SizeRatio;
24795 unsigned MaskNumElts = VT.getVectorNumElements();
24796 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
24799 unsigned NumConcat = WidenNumElts / MaskNumElts;
24800 SmallVector<SDValue, 16> Ops(NumConcat);
24801 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
24803 for (unsigned i = 1; i != NumConcat; ++i)
24806 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
24809 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
24810 NewMask, StVT, Mst->getMemOperand(), false);
24812 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
24813 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
24814 const X86Subtarget *Subtarget) {
24815 StoreSDNode *St = cast<StoreSDNode>(N);
24816 EVT VT = St->getValue().getValueType();
24817 EVT StVT = St->getMemoryVT();
24819 SDValue StoredVal = St->getOperand(1);
24820 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24822 // If we are saving a concatenation of two XMM registers and 32-byte stores
24823 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
24824 unsigned Alignment = St->getAlignment();
24825 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
24826 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
24827 StVT == VT && !IsAligned) {
24828 unsigned NumElems = VT.getVectorNumElements();
24832 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
24833 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
24835 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
24836 SDValue Ptr0 = St->getBasePtr();
24837 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
24839 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
24840 St->getPointerInfo(), St->isVolatile(),
24841 St->isNonTemporal(), Alignment);
24842 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
24843 St->getPointerInfo(), St->isVolatile(),
24844 St->isNonTemporal(),
24845 std::min(16U, Alignment));
24846 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
24849 // Optimize trunc store (of multiple scalars) to shuffle and store.
24850 // First, pack all of the elements in one place. Next, store to memory
24851 // in fewer chunks.
24852 if (St->isTruncatingStore() && VT.isVector()) {
24853 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24854 unsigned NumElems = VT.getVectorNumElements();
24855 assert(StVT != VT && "Cannot truncate to the same type");
24856 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
24857 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
24859 // From, To sizes and ElemCount must be pow of two
24860 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
24861 // We are going to use the original vector elt for storing.
24862 // Accumulated smaller vector elements must be a multiple of the store size.
24863 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
24865 unsigned SizeRatio = FromSz / ToSz;
24867 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
24869 // Create a type on which we perform the shuffle
24870 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
24871 StVT.getScalarType(), NumElems*SizeRatio);
24873 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
24875 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
24876 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
24877 for (unsigned i = 0; i != NumElems; ++i)
24878 ShuffleVec[i] = i * SizeRatio;
24880 // Can't shuffle using an illegal type.
24881 if (!TLI.isTypeLegal(WideVecVT))
24884 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
24885 DAG.getUNDEF(WideVecVT),
24887 // At this point all of the data is stored at the bottom of the
24888 // register. We now need to save it to mem.
24890 // Find the largest store unit
24891 MVT StoreType = MVT::i8;
24892 for (MVT Tp : MVT::integer_valuetypes()) {
24893 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
24897 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
24898 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
24899 (64 <= NumElems * ToSz))
24900 StoreType = MVT::f64;
24902 // Bitcast the original vector into a vector of store-size units
24903 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
24904 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
24905 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
24906 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
24907 SmallVector<SDValue, 8> Chains;
24908 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
24909 TLI.getPointerTy());
24910 SDValue Ptr = St->getBasePtr();
24912 // Perform one or more big stores into memory.
24913 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
24914 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
24915 StoreType, ShuffWide,
24916 DAG.getIntPtrConstant(i));
24917 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
24918 St->getPointerInfo(), St->isVolatile(),
24919 St->isNonTemporal(), St->getAlignment());
24920 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
24921 Chains.push_back(Ch);
24924 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
24927 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
24928 // the FP state in cases where an emms may be missing.
24929 // A preferable solution to the general problem is to figure out the right
24930 // places to insert EMMS. This qualifies as a quick hack.
24932 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
24933 if (VT.getSizeInBits() != 64)
24936 const Function *F = DAG.getMachineFunction().getFunction();
24937 bool NoImplicitFloatOps = F->getAttributes().
24938 hasAttribute(AttributeSet::FunctionIndex, Attribute::NoImplicitFloat);
24939 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
24940 && Subtarget->hasSSE2();
24941 if ((VT.isVector() ||
24942 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
24943 isa<LoadSDNode>(St->getValue()) &&
24944 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
24945 St->getChain().hasOneUse() && !St->isVolatile()) {
24946 SDNode* LdVal = St->getValue().getNode();
24947 LoadSDNode *Ld = nullptr;
24948 int TokenFactorIndex = -1;
24949 SmallVector<SDValue, 8> Ops;
24950 SDNode* ChainVal = St->getChain().getNode();
24951 // Must be a store of a load. We currently handle two cases: the load
24952 // is a direct child, and it's under an intervening TokenFactor. It is
24953 // possible to dig deeper under nested TokenFactors.
24954 if (ChainVal == LdVal)
24955 Ld = cast<LoadSDNode>(St->getChain());
24956 else if (St->getValue().hasOneUse() &&
24957 ChainVal->getOpcode() == ISD::TokenFactor) {
24958 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
24959 if (ChainVal->getOperand(i).getNode() == LdVal) {
24960 TokenFactorIndex = i;
24961 Ld = cast<LoadSDNode>(St->getValue());
24963 Ops.push_back(ChainVal->getOperand(i));
24967 if (!Ld || !ISD::isNormalLoad(Ld))
24970 // If this is not the MMX case, i.e. we are just turning i64 load/store
24971 // into f64 load/store, avoid the transformation if there are multiple
24972 // uses of the loaded value.
24973 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
24978 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
24979 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
24981 if (Subtarget->is64Bit() || F64IsLegal) {
24982 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
24983 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
24984 Ld->getPointerInfo(), Ld->isVolatile(),
24985 Ld->isNonTemporal(), Ld->isInvariant(),
24986 Ld->getAlignment());
24987 SDValue NewChain = NewLd.getValue(1);
24988 if (TokenFactorIndex != -1) {
24989 Ops.push_back(NewChain);
24990 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
24992 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
24993 St->getPointerInfo(),
24994 St->isVolatile(), St->isNonTemporal(),
24995 St->getAlignment());
24998 // Otherwise, lower to two pairs of 32-bit loads / stores.
24999 SDValue LoAddr = Ld->getBasePtr();
25000 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25001 DAG.getConstant(4, MVT::i32));
25003 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25004 Ld->getPointerInfo(),
25005 Ld->isVolatile(), Ld->isNonTemporal(),
25006 Ld->isInvariant(), Ld->getAlignment());
25007 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25008 Ld->getPointerInfo().getWithOffset(4),
25009 Ld->isVolatile(), Ld->isNonTemporal(),
25011 MinAlign(Ld->getAlignment(), 4));
25013 SDValue NewChain = LoLd.getValue(1);
25014 if (TokenFactorIndex != -1) {
25015 Ops.push_back(LoLd);
25016 Ops.push_back(HiLd);
25017 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25020 LoAddr = St->getBasePtr();
25021 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25022 DAG.getConstant(4, MVT::i32));
25024 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25025 St->getPointerInfo(),
25026 St->isVolatile(), St->isNonTemporal(),
25027 St->getAlignment());
25028 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25029 St->getPointerInfo().getWithOffset(4),
25031 St->isNonTemporal(),
25032 MinAlign(St->getAlignment(), 4));
25033 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25038 /// Return 'true' if this vector operation is "horizontal"
25039 /// and return the operands for the horizontal operation in LHS and RHS. A
25040 /// horizontal operation performs the binary operation on successive elements
25041 /// of its first operand, then on successive elements of its second operand,
25042 /// returning the resulting values in a vector. For example, if
25043 /// A = < float a0, float a1, float a2, float a3 >
25045 /// B = < float b0, float b1, float b2, float b3 >
25046 /// then the result of doing a horizontal operation on A and B is
25047 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25048 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25049 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25050 /// set to A, RHS to B, and the routine returns 'true'.
25051 /// Note that the binary operation should have the property that if one of the
25052 /// operands is UNDEF then the result is UNDEF.
25053 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25054 // Look for the following pattern: if
25055 // A = < float a0, float a1, float a2, float a3 >
25056 // B = < float b0, float b1, float b2, float b3 >
25058 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25059 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25060 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25061 // which is A horizontal-op B.
25063 // At least one of the operands should be a vector shuffle.
25064 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25065 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25068 MVT VT = LHS.getSimpleValueType();
25070 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25071 "Unsupported vector type for horizontal add/sub");
25073 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25074 // operate independently on 128-bit lanes.
25075 unsigned NumElts = VT.getVectorNumElements();
25076 unsigned NumLanes = VT.getSizeInBits()/128;
25077 unsigned NumLaneElts = NumElts / NumLanes;
25078 assert((NumLaneElts % 2 == 0) &&
25079 "Vector type should have an even number of elements in each lane");
25080 unsigned HalfLaneElts = NumLaneElts/2;
25082 // View LHS in the form
25083 // LHS = VECTOR_SHUFFLE A, B, LMask
25084 // If LHS is not a shuffle then pretend it is the shuffle
25085 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25086 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25089 SmallVector<int, 16> LMask(NumElts);
25090 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25091 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25092 A = LHS.getOperand(0);
25093 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25094 B = LHS.getOperand(1);
25095 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25096 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25098 if (LHS.getOpcode() != ISD::UNDEF)
25100 for (unsigned i = 0; i != NumElts; ++i)
25104 // Likewise, view RHS in the form
25105 // RHS = VECTOR_SHUFFLE C, D, RMask
25107 SmallVector<int, 16> RMask(NumElts);
25108 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25109 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25110 C = RHS.getOperand(0);
25111 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25112 D = RHS.getOperand(1);
25113 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25114 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25116 if (RHS.getOpcode() != ISD::UNDEF)
25118 for (unsigned i = 0; i != NumElts; ++i)
25122 // Check that the shuffles are both shuffling the same vectors.
25123 if (!(A == C && B == D) && !(A == D && B == C))
25126 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25127 if (!A.getNode() && !B.getNode())
25130 // If A and B occur in reverse order in RHS, then "swap" them (which means
25131 // rewriting the mask).
25133 CommuteVectorShuffleMask(RMask, NumElts);
25135 // At this point LHS and RHS are equivalent to
25136 // LHS = VECTOR_SHUFFLE A, B, LMask
25137 // RHS = VECTOR_SHUFFLE A, B, RMask
25138 // Check that the masks correspond to performing a horizontal operation.
25139 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25140 for (unsigned i = 0; i != NumLaneElts; ++i) {
25141 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25143 // Ignore any UNDEF components.
25144 if (LIdx < 0 || RIdx < 0 ||
25145 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25146 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25149 // Check that successive elements are being operated on. If not, this is
25150 // not a horizontal operation.
25151 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25152 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25153 if (!(LIdx == Index && RIdx == Index + 1) &&
25154 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25159 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25160 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25164 /// Do target-specific dag combines on floating point adds.
25165 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25166 const X86Subtarget *Subtarget) {
25167 EVT VT = N->getValueType(0);
25168 SDValue LHS = N->getOperand(0);
25169 SDValue RHS = N->getOperand(1);
25171 // Try to synthesize horizontal adds from adds of shuffles.
25172 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25173 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25174 isHorizontalBinOp(LHS, RHS, true))
25175 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25179 /// Do target-specific dag combines on floating point subs.
25180 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25181 const X86Subtarget *Subtarget) {
25182 EVT VT = N->getValueType(0);
25183 SDValue LHS = N->getOperand(0);
25184 SDValue RHS = N->getOperand(1);
25186 // Try to synthesize horizontal subs from subs of shuffles.
25187 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25188 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25189 isHorizontalBinOp(LHS, RHS, false))
25190 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25194 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25195 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25196 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25197 // F[X]OR(0.0, x) -> x
25198 // F[X]OR(x, 0.0) -> x
25199 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25200 if (C->getValueAPF().isPosZero())
25201 return N->getOperand(1);
25202 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25203 if (C->getValueAPF().isPosZero())
25204 return N->getOperand(0);
25208 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25209 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25210 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25212 // Only perform optimizations if UnsafeMath is used.
25213 if (!DAG.getTarget().Options.UnsafeFPMath)
25216 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25217 // into FMINC and FMAXC, which are Commutative operations.
25218 unsigned NewOp = 0;
25219 switch (N->getOpcode()) {
25220 default: llvm_unreachable("unknown opcode");
25221 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25222 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25225 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25226 N->getOperand(0), N->getOperand(1));
25229 /// Do target-specific dag combines on X86ISD::FAND nodes.
25230 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25231 // FAND(0.0, x) -> 0.0
25232 // FAND(x, 0.0) -> 0.0
25233 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25234 if (C->getValueAPF().isPosZero())
25235 return N->getOperand(0);
25236 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25237 if (C->getValueAPF().isPosZero())
25238 return N->getOperand(1);
25242 /// Do target-specific dag combines on X86ISD::FANDN nodes
25243 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25244 // FANDN(x, 0.0) -> 0.0
25245 // FANDN(0.0, x) -> x
25246 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25247 if (C->getValueAPF().isPosZero())
25248 return N->getOperand(1);
25249 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25250 if (C->getValueAPF().isPosZero())
25251 return N->getOperand(1);
25255 static SDValue PerformBTCombine(SDNode *N,
25257 TargetLowering::DAGCombinerInfo &DCI) {
25258 // BT ignores high bits in the bit index operand.
25259 SDValue Op1 = N->getOperand(1);
25260 if (Op1.hasOneUse()) {
25261 unsigned BitWidth = Op1.getValueSizeInBits();
25262 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25263 APInt KnownZero, KnownOne;
25264 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25265 !DCI.isBeforeLegalizeOps());
25266 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25267 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25268 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25269 DCI.CommitTargetLoweringOpt(TLO);
25274 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25275 SDValue Op = N->getOperand(0);
25276 if (Op.getOpcode() == ISD::BITCAST)
25277 Op = Op.getOperand(0);
25278 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25279 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25280 VT.getVectorElementType().getSizeInBits() ==
25281 OpVT.getVectorElementType().getSizeInBits()) {
25282 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25287 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25288 const X86Subtarget *Subtarget) {
25289 EVT VT = N->getValueType(0);
25290 if (!VT.isVector())
25293 SDValue N0 = N->getOperand(0);
25294 SDValue N1 = N->getOperand(1);
25295 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25298 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25299 // both SSE and AVX2 since there is no sign-extended shift right
25300 // operation on a vector with 64-bit elements.
25301 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25302 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25303 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25304 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25305 SDValue N00 = N0.getOperand(0);
25307 // EXTLOAD has a better solution on AVX2,
25308 // it may be replaced with X86ISD::VSEXT node.
25309 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25310 if (!ISD::isNormalLoad(N00.getNode()))
25313 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25314 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25316 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25322 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25323 TargetLowering::DAGCombinerInfo &DCI,
25324 const X86Subtarget *Subtarget) {
25325 SDValue N0 = N->getOperand(0);
25326 EVT VT = N->getValueType(0);
25328 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25329 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25330 // This exposes the sext to the sdivrem lowering, so that it directly extends
25331 // from AH (which we otherwise need to do contortions to access).
25332 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25333 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25335 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25336 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25337 N0.getOperand(0), N0.getOperand(1));
25338 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25339 return R.getValue(1);
25342 if (!DCI.isBeforeLegalizeOps())
25345 if (!Subtarget->hasFp256())
25348 if (VT.isVector() && VT.getSizeInBits() == 256) {
25349 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25357 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25358 const X86Subtarget* Subtarget) {
25360 EVT VT = N->getValueType(0);
25362 // Let legalize expand this if it isn't a legal type yet.
25363 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25366 EVT ScalarVT = VT.getScalarType();
25367 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25368 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25371 SDValue A = N->getOperand(0);
25372 SDValue B = N->getOperand(1);
25373 SDValue C = N->getOperand(2);
25375 bool NegA = (A.getOpcode() == ISD::FNEG);
25376 bool NegB = (B.getOpcode() == ISD::FNEG);
25377 bool NegC = (C.getOpcode() == ISD::FNEG);
25379 // Negative multiplication when NegA xor NegB
25380 bool NegMul = (NegA != NegB);
25382 A = A.getOperand(0);
25384 B = B.getOperand(0);
25386 C = C.getOperand(0);
25390 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25392 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25394 return DAG.getNode(Opcode, dl, VT, A, B, C);
25397 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25398 TargetLowering::DAGCombinerInfo &DCI,
25399 const X86Subtarget *Subtarget) {
25400 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25401 // (and (i32 x86isd::setcc_carry), 1)
25402 // This eliminates the zext. This transformation is necessary because
25403 // ISD::SETCC is always legalized to i8.
25405 SDValue N0 = N->getOperand(0);
25406 EVT VT = N->getValueType(0);
25408 if (N0.getOpcode() == ISD::AND &&
25410 N0.getOperand(0).hasOneUse()) {
25411 SDValue N00 = N0.getOperand(0);
25412 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25413 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25414 if (!C || C->getZExtValue() != 1)
25416 return DAG.getNode(ISD::AND, dl, VT,
25417 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25418 N00.getOperand(0), N00.getOperand(1)),
25419 DAG.getConstant(1, VT));
25423 if (N0.getOpcode() == ISD::TRUNCATE &&
25425 N0.getOperand(0).hasOneUse()) {
25426 SDValue N00 = N0.getOperand(0);
25427 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25428 return DAG.getNode(ISD::AND, dl, VT,
25429 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25430 N00.getOperand(0), N00.getOperand(1)),
25431 DAG.getConstant(1, VT));
25434 if (VT.is256BitVector()) {
25435 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25440 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25441 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25442 // This exposes the zext to the udivrem lowering, so that it directly extends
25443 // from AH (which we otherwise need to do contortions to access).
25444 if (N0.getOpcode() == ISD::UDIVREM &&
25445 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25446 (VT == MVT::i32 || VT == MVT::i64)) {
25447 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25448 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25449 N0.getOperand(0), N0.getOperand(1));
25450 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25451 return R.getValue(1);
25457 // Optimize x == -y --> x+y == 0
25458 // x != -y --> x+y != 0
25459 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25460 const X86Subtarget* Subtarget) {
25461 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25462 SDValue LHS = N->getOperand(0);
25463 SDValue RHS = N->getOperand(1);
25464 EVT VT = N->getValueType(0);
25467 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
25468 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
25469 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
25470 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25471 LHS.getValueType(), RHS, LHS.getOperand(1));
25472 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25473 addV, DAG.getConstant(0, addV.getValueType()), CC);
25475 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
25476 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
25477 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
25478 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25479 RHS.getValueType(), LHS, RHS.getOperand(1));
25480 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25481 addV, DAG.getConstant(0, addV.getValueType()), CC);
25484 if (VT.getScalarType() == MVT::i1) {
25485 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
25486 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25487 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
25488 if (!IsSEXT0 && !IsVZero0)
25490 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
25491 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25492 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
25494 if (!IsSEXT1 && !IsVZero1)
25497 if (IsSEXT0 && IsVZero1) {
25498 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
25499 if (CC == ISD::SETEQ)
25500 return DAG.getNOT(DL, LHS.getOperand(0), VT);
25501 return LHS.getOperand(0);
25503 if (IsSEXT1 && IsVZero0) {
25504 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
25505 if (CC == ISD::SETEQ)
25506 return DAG.getNOT(DL, RHS.getOperand(0), VT);
25507 return RHS.getOperand(0);
25514 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
25515 const X86Subtarget *Subtarget) {
25517 MVT VT = N->getOperand(1)->getSimpleValueType(0);
25518 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
25519 "X86insertps is only defined for v4x32");
25521 SDValue Ld = N->getOperand(1);
25522 if (MayFoldLoad(Ld)) {
25523 // Extract the countS bits from the immediate so we can get the proper
25524 // address when narrowing the vector load to a specific element.
25525 // When the second source op is a memory address, interps doesn't use
25526 // countS and just gets an f32 from that address.
25527 unsigned DestIndex =
25528 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
25529 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
25533 // Create this as a scalar to vector to match the instruction pattern.
25534 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
25535 // countS bits are ignored when loading from memory on insertps, which
25536 // means we don't need to explicitly set them to 0.
25537 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
25538 LoadScalarToVector, N->getOperand(2));
25541 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
25542 // as "sbb reg,reg", since it can be extended without zext and produces
25543 // an all-ones bit which is more useful than 0/1 in some cases.
25544 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
25547 return DAG.getNode(ISD::AND, DL, VT,
25548 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25549 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
25550 DAG.getConstant(1, VT));
25551 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
25552 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
25553 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25554 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
25557 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
25558 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
25559 TargetLowering::DAGCombinerInfo &DCI,
25560 const X86Subtarget *Subtarget) {
25562 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
25563 SDValue EFLAGS = N->getOperand(1);
25565 if (CC == X86::COND_A) {
25566 // Try to convert COND_A into COND_B in an attempt to facilitate
25567 // materializing "setb reg".
25569 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
25570 // cannot take an immediate as its first operand.
25572 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
25573 EFLAGS.getValueType().isInteger() &&
25574 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
25575 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
25576 EFLAGS.getNode()->getVTList(),
25577 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
25578 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
25579 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
25583 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
25584 // a zext and produces an all-ones bit which is more useful than 0/1 in some
25586 if (CC == X86::COND_B)
25587 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
25591 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25592 if (Flags.getNode()) {
25593 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25594 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
25600 // Optimize branch condition evaluation.
25602 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
25603 TargetLowering::DAGCombinerInfo &DCI,
25604 const X86Subtarget *Subtarget) {
25606 SDValue Chain = N->getOperand(0);
25607 SDValue Dest = N->getOperand(1);
25608 SDValue EFLAGS = N->getOperand(3);
25609 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
25613 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25614 if (Flags.getNode()) {
25615 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25616 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
25623 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
25624 SelectionDAG &DAG) {
25625 // Take advantage of vector comparisons producing 0 or -1 in each lane to
25626 // optimize away operation when it's from a constant.
25628 // The general transformation is:
25629 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
25630 // AND(VECTOR_CMP(x,y), constant2)
25631 // constant2 = UNARYOP(constant)
25633 // Early exit if this isn't a vector operation, the operand of the
25634 // unary operation isn't a bitwise AND, or if the sizes of the operations
25635 // aren't the same.
25636 EVT VT = N->getValueType(0);
25637 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
25638 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
25639 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
25642 // Now check that the other operand of the AND is a constant. We could
25643 // make the transformation for non-constant splats as well, but it's unclear
25644 // that would be a benefit as it would not eliminate any operations, just
25645 // perform one more step in scalar code before moving to the vector unit.
25646 if (BuildVectorSDNode *BV =
25647 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
25648 // Bail out if the vector isn't a constant.
25649 if (!BV->isConstant())
25652 // Everything checks out. Build up the new and improved node.
25654 EVT IntVT = BV->getValueType(0);
25655 // Create a new constant of the appropriate type for the transformed
25657 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
25658 // The AND node needs bitcasts to/from an integer vector type around it.
25659 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
25660 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
25661 N->getOperand(0)->getOperand(0), MaskConst);
25662 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
25669 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
25670 const X86Subtarget *Subtarget) {
25671 // First try to optimize away the conversion entirely when it's
25672 // conditionally from a constant. Vectors only.
25673 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
25674 if (Res != SDValue())
25677 // Now move on to more general possibilities.
25678 SDValue Op0 = N->getOperand(0);
25679 EVT InVT = Op0->getValueType(0);
25681 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
25682 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
25684 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
25685 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
25686 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
25689 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
25690 // a 32-bit target where SSE doesn't support i64->FP operations.
25691 if (Op0.getOpcode() == ISD::LOAD) {
25692 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
25693 EVT VT = Ld->getValueType(0);
25694 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
25695 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
25696 !Subtarget->is64Bit() && VT == MVT::i64) {
25697 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
25698 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
25699 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
25706 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
25707 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
25708 X86TargetLowering::DAGCombinerInfo &DCI) {
25709 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
25710 // the result is either zero or one (depending on the input carry bit).
25711 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
25712 if (X86::isZeroNode(N->getOperand(0)) &&
25713 X86::isZeroNode(N->getOperand(1)) &&
25714 // We don't have a good way to replace an EFLAGS use, so only do this when
25716 SDValue(N, 1).use_empty()) {
25718 EVT VT = N->getValueType(0);
25719 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
25720 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
25721 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
25722 DAG.getConstant(X86::COND_B,MVT::i8),
25724 DAG.getConstant(1, VT));
25725 return DCI.CombineTo(N, Res1, CarryOut);
25731 // fold (add Y, (sete X, 0)) -> adc 0, Y
25732 // (add Y, (setne X, 0)) -> sbb -1, Y
25733 // (sub (sete X, 0), Y) -> sbb 0, Y
25734 // (sub (setne X, 0), Y) -> adc -1, Y
25735 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
25738 // Look through ZExts.
25739 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
25740 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
25743 SDValue SetCC = Ext.getOperand(0);
25744 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
25747 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
25748 if (CC != X86::COND_E && CC != X86::COND_NE)
25751 SDValue Cmp = SetCC.getOperand(1);
25752 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
25753 !X86::isZeroNode(Cmp.getOperand(1)) ||
25754 !Cmp.getOperand(0).getValueType().isInteger())
25757 SDValue CmpOp0 = Cmp.getOperand(0);
25758 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
25759 DAG.getConstant(1, CmpOp0.getValueType()));
25761 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
25762 if (CC == X86::COND_NE)
25763 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
25764 DL, OtherVal.getValueType(), OtherVal,
25765 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
25766 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
25767 DL, OtherVal.getValueType(), OtherVal,
25768 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
25771 /// PerformADDCombine - Do target-specific dag combines on integer adds.
25772 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
25773 const X86Subtarget *Subtarget) {
25774 EVT VT = N->getValueType(0);
25775 SDValue Op0 = N->getOperand(0);
25776 SDValue Op1 = N->getOperand(1);
25778 // Try to synthesize horizontal adds from adds of shuffles.
25779 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
25780 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
25781 isHorizontalBinOp(Op0, Op1, true))
25782 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
25784 return OptimizeConditionalInDecrement(N, DAG);
25787 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
25788 const X86Subtarget *Subtarget) {
25789 SDValue Op0 = N->getOperand(0);
25790 SDValue Op1 = N->getOperand(1);
25792 // X86 can't encode an immediate LHS of a sub. See if we can push the
25793 // negation into a preceding instruction.
25794 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
25795 // If the RHS of the sub is a XOR with one use and a constant, invert the
25796 // immediate. Then add one to the LHS of the sub so we can turn
25797 // X-Y -> X+~Y+1, saving one register.
25798 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
25799 isa<ConstantSDNode>(Op1.getOperand(1))) {
25800 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
25801 EVT VT = Op0.getValueType();
25802 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
25804 DAG.getConstant(~XorC, VT));
25805 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
25806 DAG.getConstant(C->getAPIntValue()+1, VT));
25810 // Try to synthesize horizontal adds from adds of shuffles.
25811 EVT VT = N->getValueType(0);
25812 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
25813 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
25814 isHorizontalBinOp(Op0, Op1, true))
25815 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
25817 return OptimizeConditionalInDecrement(N, DAG);
25820 /// performVZEXTCombine - Performs build vector combines
25821 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
25822 TargetLowering::DAGCombinerInfo &DCI,
25823 const X86Subtarget *Subtarget) {
25825 MVT VT = N->getSimpleValueType(0);
25826 SDValue Op = N->getOperand(0);
25827 MVT OpVT = Op.getSimpleValueType();
25828 MVT OpEltVT = OpVT.getVectorElementType();
25829 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
25831 // (vzext (bitcast (vzext (x)) -> (vzext x)
25833 while (V.getOpcode() == ISD::BITCAST)
25834 V = V.getOperand(0);
25836 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
25837 MVT InnerVT = V.getSimpleValueType();
25838 MVT InnerEltVT = InnerVT.getVectorElementType();
25840 // If the element sizes match exactly, we can just do one larger vzext. This
25841 // is always an exact type match as vzext operates on integer types.
25842 if (OpEltVT == InnerEltVT) {
25843 assert(OpVT == InnerVT && "Types must match for vzext!");
25844 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
25847 // The only other way we can combine them is if only a single element of the
25848 // inner vzext is used in the input to the outer vzext.
25849 if (InnerEltVT.getSizeInBits() < InputBits)
25852 // In this case, the inner vzext is completely dead because we're going to
25853 // only look at bits inside of the low element. Just do the outer vzext on
25854 // a bitcast of the input to the inner.
25855 return DAG.getNode(X86ISD::VZEXT, DL, VT,
25856 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
25859 // Check if we can bypass extracting and re-inserting an element of an input
25860 // vector. Essentialy:
25861 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
25862 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
25863 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
25864 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
25865 SDValue ExtractedV = V.getOperand(0);
25866 SDValue OrigV = ExtractedV.getOperand(0);
25867 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
25868 if (ExtractIdx->getZExtValue() == 0) {
25869 MVT OrigVT = OrigV.getSimpleValueType();
25870 // Extract a subvector if necessary...
25871 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
25872 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
25873 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
25874 OrigVT.getVectorNumElements() / Ratio);
25875 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
25876 DAG.getIntPtrConstant(0));
25878 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
25879 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
25886 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
25887 DAGCombinerInfo &DCI) const {
25888 SelectionDAG &DAG = DCI.DAG;
25889 switch (N->getOpcode()) {
25891 case ISD::EXTRACT_VECTOR_ELT:
25892 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
25895 case X86ISD::SHRUNKBLEND:
25896 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
25897 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
25898 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
25899 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
25900 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
25901 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
25904 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
25905 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
25906 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
25907 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
25908 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
25909 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
25910 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
25911 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
25912 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
25913 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
25914 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
25916 case X86ISD::FOR: return PerformFORCombine(N, DAG);
25918 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
25919 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
25920 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
25921 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
25922 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
25923 case ISD::ANY_EXTEND:
25924 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
25925 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
25926 case ISD::SIGN_EXTEND_INREG:
25927 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
25928 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
25929 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
25930 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
25931 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
25932 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
25933 case X86ISD::SHUFP: // Handle all target specific shuffles
25934 case X86ISD::PALIGNR:
25935 case X86ISD::UNPCKH:
25936 case X86ISD::UNPCKL:
25937 case X86ISD::MOVHLPS:
25938 case X86ISD::MOVLHPS:
25939 case X86ISD::PSHUFB:
25940 case X86ISD::PSHUFD:
25941 case X86ISD::PSHUFHW:
25942 case X86ISD::PSHUFLW:
25943 case X86ISD::MOVSS:
25944 case X86ISD::MOVSD:
25945 case X86ISD::VPERMILPI:
25946 case X86ISD::VPERM2X128:
25947 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
25948 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
25949 case ISD::INTRINSIC_WO_CHAIN:
25950 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
25951 case X86ISD::INSERTPS: {
25952 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
25953 return PerformINSERTPSCombine(N, DAG, Subtarget);
25956 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
25962 /// isTypeDesirableForOp - Return true if the target has native support for
25963 /// the specified value type and it is 'desirable' to use the type for the
25964 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
25965 /// instruction encodings are longer and some i16 instructions are slow.
25966 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
25967 if (!isTypeLegal(VT))
25969 if (VT != MVT::i16)
25976 case ISD::SIGN_EXTEND:
25977 case ISD::ZERO_EXTEND:
25978 case ISD::ANY_EXTEND:
25991 /// IsDesirableToPromoteOp - This method query the target whether it is
25992 /// beneficial for dag combiner to promote the specified node. If true, it
25993 /// should return the desired promotion type by reference.
25994 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
25995 EVT VT = Op.getValueType();
25996 if (VT != MVT::i16)
25999 bool Promote = false;
26000 bool Commute = false;
26001 switch (Op.getOpcode()) {
26004 LoadSDNode *LD = cast<LoadSDNode>(Op);
26005 // If the non-extending load has a single use and it's not live out, then it
26006 // might be folded.
26007 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26008 Op.hasOneUse()*/) {
26009 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26010 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26011 // The only case where we'd want to promote LOAD (rather then it being
26012 // promoted as an operand is when it's only use is liveout.
26013 if (UI->getOpcode() != ISD::CopyToReg)
26020 case ISD::SIGN_EXTEND:
26021 case ISD::ZERO_EXTEND:
26022 case ISD::ANY_EXTEND:
26027 SDValue N0 = Op.getOperand(0);
26028 // Look out for (store (shl (load), x)).
26029 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26042 SDValue N0 = Op.getOperand(0);
26043 SDValue N1 = Op.getOperand(1);
26044 if (!Commute && MayFoldLoad(N1))
26046 // Avoid disabling potential load folding opportunities.
26047 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26049 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26059 //===----------------------------------------------------------------------===//
26060 // X86 Inline Assembly Support
26061 //===----------------------------------------------------------------------===//
26064 // Helper to match a string separated by whitespace.
26065 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26066 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26068 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26069 StringRef piece(*args[i]);
26070 if (!s.startswith(piece)) // Check if the piece matches.
26073 s = s.substr(piece.size());
26074 StringRef::size_type pos = s.find_first_not_of(" \t");
26075 if (pos == 0) // We matched a prefix.
26083 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26086 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26088 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26089 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26090 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26091 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26093 if (AsmPieces.size() == 3)
26095 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26102 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26103 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26105 std::string AsmStr = IA->getAsmString();
26107 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26108 if (!Ty || Ty->getBitWidth() % 16 != 0)
26111 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26112 SmallVector<StringRef, 4> AsmPieces;
26113 SplitString(AsmStr, AsmPieces, ";\n");
26115 switch (AsmPieces.size()) {
26116 default: return false;
26118 // FIXME: this should verify that we are targeting a 486 or better. If not,
26119 // we will turn this bswap into something that will be lowered to logical
26120 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26121 // lower so don't worry about this.
26123 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26124 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26125 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26126 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26127 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26128 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26129 // No need to check constraints, nothing other than the equivalent of
26130 // "=r,0" would be valid here.
26131 return IntrinsicLowering::LowerToByteSwap(CI);
26134 // rorw $$8, ${0:w} --> llvm.bswap.i16
26135 if (CI->getType()->isIntegerTy(16) &&
26136 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26137 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26138 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26140 const std::string &ConstraintsStr = IA->getConstraintString();
26141 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26142 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26143 if (clobbersFlagRegisters(AsmPieces))
26144 return IntrinsicLowering::LowerToByteSwap(CI);
26148 if (CI->getType()->isIntegerTy(32) &&
26149 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26150 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26151 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26152 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26154 const std::string &ConstraintsStr = IA->getConstraintString();
26155 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26156 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26157 if (clobbersFlagRegisters(AsmPieces))
26158 return IntrinsicLowering::LowerToByteSwap(CI);
26161 if (CI->getType()->isIntegerTy(64)) {
26162 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26163 if (Constraints.size() >= 2 &&
26164 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26165 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26166 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26167 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26168 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26169 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26170 return IntrinsicLowering::LowerToByteSwap(CI);
26178 /// getConstraintType - Given a constraint letter, return the type of
26179 /// constraint it is for this target.
26180 X86TargetLowering::ConstraintType
26181 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26182 if (Constraint.size() == 1) {
26183 switch (Constraint[0]) {
26194 return C_RegisterClass;
26218 return TargetLowering::getConstraintType(Constraint);
26221 /// Examine constraint type and operand type and determine a weight value.
26222 /// This object must already have been set up with the operand type
26223 /// and the current alternative constraint selected.
26224 TargetLowering::ConstraintWeight
26225 X86TargetLowering::getSingleConstraintMatchWeight(
26226 AsmOperandInfo &info, const char *constraint) const {
26227 ConstraintWeight weight = CW_Invalid;
26228 Value *CallOperandVal = info.CallOperandVal;
26229 // If we don't have a value, we can't do a match,
26230 // but allow it at the lowest weight.
26231 if (!CallOperandVal)
26233 Type *type = CallOperandVal->getType();
26234 // Look at the constraint type.
26235 switch (*constraint) {
26237 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26248 if (CallOperandVal->getType()->isIntegerTy())
26249 weight = CW_SpecificReg;
26254 if (type->isFloatingPointTy())
26255 weight = CW_SpecificReg;
26258 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26259 weight = CW_SpecificReg;
26263 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26264 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26265 weight = CW_Register;
26268 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26269 if (C->getZExtValue() <= 31)
26270 weight = CW_Constant;
26274 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26275 if (C->getZExtValue() <= 63)
26276 weight = CW_Constant;
26280 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26281 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26282 weight = CW_Constant;
26286 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26287 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26288 weight = CW_Constant;
26292 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26293 if (C->getZExtValue() <= 3)
26294 weight = CW_Constant;
26298 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26299 if (C->getZExtValue() <= 0xff)
26300 weight = CW_Constant;
26305 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26306 weight = CW_Constant;
26310 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26311 if ((C->getSExtValue() >= -0x80000000LL) &&
26312 (C->getSExtValue() <= 0x7fffffffLL))
26313 weight = CW_Constant;
26317 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26318 if (C->getZExtValue() <= 0xffffffff)
26319 weight = CW_Constant;
26326 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26327 /// with another that has more specific requirements based on the type of the
26328 /// corresponding operand.
26329 const char *X86TargetLowering::
26330 LowerXConstraint(EVT ConstraintVT) const {
26331 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26332 // 'f' like normal targets.
26333 if (ConstraintVT.isFloatingPoint()) {
26334 if (Subtarget->hasSSE2())
26336 if (Subtarget->hasSSE1())
26340 return TargetLowering::LowerXConstraint(ConstraintVT);
26343 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26344 /// vector. If it is invalid, don't add anything to Ops.
26345 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26346 std::string &Constraint,
26347 std::vector<SDValue>&Ops,
26348 SelectionDAG &DAG) const {
26351 // Only support length 1 constraints for now.
26352 if (Constraint.length() > 1) return;
26354 char ConstraintLetter = Constraint[0];
26355 switch (ConstraintLetter) {
26358 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26359 if (C->getZExtValue() <= 31) {
26360 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26366 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26367 if (C->getZExtValue() <= 63) {
26368 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26374 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26375 if (isInt<8>(C->getSExtValue())) {
26376 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26382 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26383 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26384 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26385 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26391 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26392 if (C->getZExtValue() <= 3) {
26393 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26399 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26400 if (C->getZExtValue() <= 255) {
26401 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26407 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26408 if (C->getZExtValue() <= 127) {
26409 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26415 // 32-bit signed value
26416 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26417 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26418 C->getSExtValue())) {
26419 // Widen to 64 bits here to get it sign extended.
26420 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26423 // FIXME gcc accepts some relocatable values here too, but only in certain
26424 // memory models; it's complicated.
26429 // 32-bit unsigned value
26430 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26431 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26432 C->getZExtValue())) {
26433 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26437 // FIXME gcc accepts some relocatable values here too, but only in certain
26438 // memory models; it's complicated.
26442 // Literal immediates are always ok.
26443 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26444 // Widen to 64 bits here to get it sign extended.
26445 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26449 // In any sort of PIC mode addresses need to be computed at runtime by
26450 // adding in a register or some sort of table lookup. These can't
26451 // be used as immediates.
26452 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26455 // If we are in non-pic codegen mode, we allow the address of a global (with
26456 // an optional displacement) to be used with 'i'.
26457 GlobalAddressSDNode *GA = nullptr;
26458 int64_t Offset = 0;
26460 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26462 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26463 Offset += GA->getOffset();
26465 } else if (Op.getOpcode() == ISD::ADD) {
26466 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26467 Offset += C->getZExtValue();
26468 Op = Op.getOperand(0);
26471 } else if (Op.getOpcode() == ISD::SUB) {
26472 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26473 Offset += -C->getZExtValue();
26474 Op = Op.getOperand(0);
26479 // Otherwise, this isn't something we can handle, reject it.
26483 const GlobalValue *GV = GA->getGlobal();
26484 // If we require an extra load to get this address, as in PIC mode, we
26485 // can't accept it.
26486 if (isGlobalStubReference(
26487 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
26490 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
26491 GA->getValueType(0), Offset);
26496 if (Result.getNode()) {
26497 Ops.push_back(Result);
26500 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
26503 std::pair<unsigned, const TargetRegisterClass*>
26504 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
26506 // First, see if this is a constraint that directly corresponds to an LLVM
26508 if (Constraint.size() == 1) {
26509 // GCC Constraint Letters
26510 switch (Constraint[0]) {
26512 // TODO: Slight differences here in allocation order and leaving
26513 // RIP in the class. Do they matter any more here than they do
26514 // in the normal allocation?
26515 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
26516 if (Subtarget->is64Bit()) {
26517 if (VT == MVT::i32 || VT == MVT::f32)
26518 return std::make_pair(0U, &X86::GR32RegClass);
26519 if (VT == MVT::i16)
26520 return std::make_pair(0U, &X86::GR16RegClass);
26521 if (VT == MVT::i8 || VT == MVT::i1)
26522 return std::make_pair(0U, &X86::GR8RegClass);
26523 if (VT == MVT::i64 || VT == MVT::f64)
26524 return std::make_pair(0U, &X86::GR64RegClass);
26527 // 32-bit fallthrough
26528 case 'Q': // Q_REGS
26529 if (VT == MVT::i32 || VT == MVT::f32)
26530 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
26531 if (VT == MVT::i16)
26532 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
26533 if (VT == MVT::i8 || VT == MVT::i1)
26534 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
26535 if (VT == MVT::i64)
26536 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
26538 case 'r': // GENERAL_REGS
26539 case 'l': // INDEX_REGS
26540 if (VT == MVT::i8 || VT == MVT::i1)
26541 return std::make_pair(0U, &X86::GR8RegClass);
26542 if (VT == MVT::i16)
26543 return std::make_pair(0U, &X86::GR16RegClass);
26544 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
26545 return std::make_pair(0U, &X86::GR32RegClass);
26546 return std::make_pair(0U, &X86::GR64RegClass);
26547 case 'R': // LEGACY_REGS
26548 if (VT == MVT::i8 || VT == MVT::i1)
26549 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
26550 if (VT == MVT::i16)
26551 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
26552 if (VT == MVT::i32 || !Subtarget->is64Bit())
26553 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
26554 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
26555 case 'f': // FP Stack registers.
26556 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
26557 // value to the correct fpstack register class.
26558 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
26559 return std::make_pair(0U, &X86::RFP32RegClass);
26560 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
26561 return std::make_pair(0U, &X86::RFP64RegClass);
26562 return std::make_pair(0U, &X86::RFP80RegClass);
26563 case 'y': // MMX_REGS if MMX allowed.
26564 if (!Subtarget->hasMMX()) break;
26565 return std::make_pair(0U, &X86::VR64RegClass);
26566 case 'Y': // SSE_REGS if SSE2 allowed
26567 if (!Subtarget->hasSSE2()) break;
26569 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
26570 if (!Subtarget->hasSSE1()) break;
26572 switch (VT.SimpleTy) {
26574 // Scalar SSE types.
26577 return std::make_pair(0U, &X86::FR32RegClass);
26580 return std::make_pair(0U, &X86::FR64RegClass);
26588 return std::make_pair(0U, &X86::VR128RegClass);
26596 return std::make_pair(0U, &X86::VR256RegClass);
26601 return std::make_pair(0U, &X86::VR512RegClass);
26607 // Use the default implementation in TargetLowering to convert the register
26608 // constraint into a member of a register class.
26609 std::pair<unsigned, const TargetRegisterClass*> Res;
26610 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
26612 // Not found as a standard register?
26614 // Map st(0) -> st(7) -> ST0
26615 if (Constraint.size() == 7 && Constraint[0] == '{' &&
26616 tolower(Constraint[1]) == 's' &&
26617 tolower(Constraint[2]) == 't' &&
26618 Constraint[3] == '(' &&
26619 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
26620 Constraint[5] == ')' &&
26621 Constraint[6] == '}') {
26623 Res.first = X86::FP0+Constraint[4]-'0';
26624 Res.second = &X86::RFP80RegClass;
26628 // GCC allows "st(0)" to be called just plain "st".
26629 if (StringRef("{st}").equals_lower(Constraint)) {
26630 Res.first = X86::FP0;
26631 Res.second = &X86::RFP80RegClass;
26636 if (StringRef("{flags}").equals_lower(Constraint)) {
26637 Res.first = X86::EFLAGS;
26638 Res.second = &X86::CCRRegClass;
26642 // 'A' means EAX + EDX.
26643 if (Constraint == "A") {
26644 Res.first = X86::EAX;
26645 Res.second = &X86::GR32_ADRegClass;
26651 // Otherwise, check to see if this is a register class of the wrong value
26652 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
26653 // turn into {ax},{dx}.
26654 if (Res.second->hasType(VT))
26655 return Res; // Correct type already, nothing to do.
26657 // All of the single-register GCC register classes map their values onto
26658 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
26659 // really want an 8-bit or 32-bit register, map to the appropriate register
26660 // class and return the appropriate register.
26661 if (Res.second == &X86::GR16RegClass) {
26662 if (VT == MVT::i8 || VT == MVT::i1) {
26663 unsigned DestReg = 0;
26664 switch (Res.first) {
26666 case X86::AX: DestReg = X86::AL; break;
26667 case X86::DX: DestReg = X86::DL; break;
26668 case X86::CX: DestReg = X86::CL; break;
26669 case X86::BX: DestReg = X86::BL; break;
26672 Res.first = DestReg;
26673 Res.second = &X86::GR8RegClass;
26675 } else if (VT == MVT::i32 || VT == MVT::f32) {
26676 unsigned DestReg = 0;
26677 switch (Res.first) {
26679 case X86::AX: DestReg = X86::EAX; break;
26680 case X86::DX: DestReg = X86::EDX; break;
26681 case X86::CX: DestReg = X86::ECX; break;
26682 case X86::BX: DestReg = X86::EBX; break;
26683 case X86::SI: DestReg = X86::ESI; break;
26684 case X86::DI: DestReg = X86::EDI; break;
26685 case X86::BP: DestReg = X86::EBP; break;
26686 case X86::SP: DestReg = X86::ESP; break;
26689 Res.first = DestReg;
26690 Res.second = &X86::GR32RegClass;
26692 } else if (VT == MVT::i64 || VT == MVT::f64) {
26693 unsigned DestReg = 0;
26694 switch (Res.first) {
26696 case X86::AX: DestReg = X86::RAX; break;
26697 case X86::DX: DestReg = X86::RDX; break;
26698 case X86::CX: DestReg = X86::RCX; break;
26699 case X86::BX: DestReg = X86::RBX; break;
26700 case X86::SI: DestReg = X86::RSI; break;
26701 case X86::DI: DestReg = X86::RDI; break;
26702 case X86::BP: DestReg = X86::RBP; break;
26703 case X86::SP: DestReg = X86::RSP; break;
26706 Res.first = DestReg;
26707 Res.second = &X86::GR64RegClass;
26710 } else if (Res.second == &X86::FR32RegClass ||
26711 Res.second == &X86::FR64RegClass ||
26712 Res.second == &X86::VR128RegClass ||
26713 Res.second == &X86::VR256RegClass ||
26714 Res.second == &X86::FR32XRegClass ||
26715 Res.second == &X86::FR64XRegClass ||
26716 Res.second == &X86::VR128XRegClass ||
26717 Res.second == &X86::VR256XRegClass ||
26718 Res.second == &X86::VR512RegClass) {
26719 // Handle references to XMM physical registers that got mapped into the
26720 // wrong class. This can happen with constraints like {xmm0} where the
26721 // target independent register mapper will just pick the first match it can
26722 // find, ignoring the required type.
26724 if (VT == MVT::f32 || VT == MVT::i32)
26725 Res.second = &X86::FR32RegClass;
26726 else if (VT == MVT::f64 || VT == MVT::i64)
26727 Res.second = &X86::FR64RegClass;
26728 else if (X86::VR128RegClass.hasType(VT))
26729 Res.second = &X86::VR128RegClass;
26730 else if (X86::VR256RegClass.hasType(VT))
26731 Res.second = &X86::VR256RegClass;
26732 else if (X86::VR512RegClass.hasType(VT))
26733 Res.second = &X86::VR512RegClass;
26739 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
26741 // Scaling factors are not free at all.
26742 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
26743 // will take 2 allocations in the out of order engine instead of 1
26744 // for plain addressing mode, i.e. inst (reg1).
26746 // vaddps (%rsi,%drx), %ymm0, %ymm1
26747 // Requires two allocations (one for the load, one for the computation)
26749 // vaddps (%rsi), %ymm0, %ymm1
26750 // Requires just 1 allocation, i.e., freeing allocations for other operations
26751 // and having less micro operations to execute.
26753 // For some X86 architectures, this is even worse because for instance for
26754 // stores, the complex addressing mode forces the instruction to use the
26755 // "load" ports instead of the dedicated "store" port.
26756 // E.g., on Haswell:
26757 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
26758 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
26759 if (isLegalAddressingMode(AM, Ty))
26760 // Scale represents reg2 * scale, thus account for 1
26761 // as soon as we use a second register.
26762 return AM.Scale != 0;
26766 bool X86TargetLowering::isTargetFTOL() const {
26767 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();