1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<bool> ExperimentalVectorShuffleLegality(
76 "x86-experimental-vector-shuffle-legality", cl::init(false),
77 cl::desc("Enable experimental shuffle legality based on the experimental "
78 "shuffle lowering. Should only be used with the experimental "
82 static cl::opt<int> ReciprocalEstimateRefinementSteps(
83 "x86-recip-refinement-steps", cl::init(1),
84 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
85 "result of the hardware reciprocal estimate instruction."),
88 // Forward declarations.
89 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
92 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
93 SelectionDAG &DAG, SDLoc dl,
94 unsigned vectorWidth) {
95 assert((vectorWidth == 128 || vectorWidth == 256) &&
96 "Unsupported vector width");
97 EVT VT = Vec.getValueType();
98 EVT ElVT = VT.getVectorElementType();
99 unsigned Factor = VT.getSizeInBits()/vectorWidth;
100 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
101 VT.getVectorNumElements()/Factor);
103 // Extract from UNDEF is UNDEF.
104 if (Vec.getOpcode() == ISD::UNDEF)
105 return DAG.getUNDEF(ResultVT);
107 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
108 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
110 // This is the index of the first element of the vectorWidth-bit chunk
112 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
115 // If the input is a buildvector just emit a smaller one.
116 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
117 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
118 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
122 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
125 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
126 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
127 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
128 /// instructions or a simple subregister reference. Idx is an index in the
129 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
130 /// lowering EXTRACT_VECTOR_ELT operations easier.
131 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
132 SelectionDAG &DAG, SDLoc dl) {
133 assert((Vec.getValueType().is256BitVector() ||
134 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
138 /// Generate a DAG to grab 256-bits from a 512-bit vector.
139 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
140 SelectionDAG &DAG, SDLoc dl) {
141 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
142 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
145 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
146 unsigned IdxVal, SelectionDAG &DAG,
147 SDLoc dl, unsigned vectorWidth) {
148 assert((vectorWidth == 128 || vectorWidth == 256) &&
149 "Unsupported vector width");
150 // Inserting UNDEF is Result
151 if (Vec.getOpcode() == ISD::UNDEF)
153 EVT VT = Vec.getValueType();
154 EVT ElVT = VT.getVectorElementType();
155 EVT ResultVT = Result.getValueType();
157 // Insert the relevant vectorWidth bits.
158 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
160 // This is the index of the first element of the vectorWidth-bit chunk
162 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
165 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
166 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
169 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
170 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
171 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
172 /// simple superregister reference. Idx is an index in the 128 bits
173 /// we want. It need not be aligned to a 128-bit boundary. That makes
174 /// lowering INSERT_VECTOR_ELT operations easier.
175 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
176 SelectionDAG &DAG,SDLoc dl) {
177 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
178 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
181 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
182 SelectionDAG &DAG, SDLoc dl) {
183 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
184 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
187 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
188 /// instructions. This is used because creating CONCAT_VECTOR nodes of
189 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
190 /// large BUILD_VECTORS.
191 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
198 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
199 unsigned NumElems, SelectionDAG &DAG,
201 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
202 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
205 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
206 const X86Subtarget &STI)
207 : TargetLowering(TM), Subtarget(&STI) {
208 X86ScalarSSEf64 = Subtarget->hasSSE2();
209 X86ScalarSSEf32 = Subtarget->hasSSE1();
210 TD = getDataLayout();
212 // Set up the TargetLowering object.
213 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
215 // X86 is weird. It always uses i8 for shift amounts and setcc results.
216 setBooleanContents(ZeroOrOneBooleanContent);
217 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
218 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
220 // For 64-bit, since we have so many registers, use the ILP scheduler.
221 // For 32-bit, use the register pressure specific scheduling.
222 // For Atom, always use ILP scheduling.
223 if (Subtarget->isAtom())
224 setSchedulingPreference(Sched::ILP);
225 else if (Subtarget->is64Bit())
226 setSchedulingPreference(Sched::ILP);
228 setSchedulingPreference(Sched::RegPressure);
229 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
230 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
232 // Bypass expensive divides on Atom when compiling with O2.
233 if (TM.getOptLevel() >= CodeGenOpt::Default) {
234 if (Subtarget->hasSlowDivide32())
235 addBypassSlowDiv(32, 8);
236 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
237 addBypassSlowDiv(64, 16);
240 if (Subtarget->isTargetKnownWindowsMSVC()) {
241 // Setup Windows compiler runtime calls.
242 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
243 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
244 setLibcallName(RTLIB::SREM_I64, "_allrem");
245 setLibcallName(RTLIB::UREM_I64, "_aullrem");
246 setLibcallName(RTLIB::MUL_I64, "_allmul");
247 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
248 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
249 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
250 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
251 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
253 // The _ftol2 runtime function has an unusual calling conv, which
254 // is modeled by a special pseudo-instruction.
255 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
256 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
257 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
258 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
261 if (Subtarget->isTargetDarwin()) {
262 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
263 setUseUnderscoreSetJmp(false);
264 setUseUnderscoreLongJmp(false);
265 } else if (Subtarget->isTargetWindowsGNU()) {
266 // MS runtime is weird: it exports _setjmp, but longjmp!
267 setUseUnderscoreSetJmp(true);
268 setUseUnderscoreLongJmp(false);
270 setUseUnderscoreSetJmp(true);
271 setUseUnderscoreLongJmp(true);
274 // Set up the register classes.
275 addRegisterClass(MVT::i8, &X86::GR8RegClass);
276 addRegisterClass(MVT::i16, &X86::GR16RegClass);
277 addRegisterClass(MVT::i32, &X86::GR32RegClass);
278 if (Subtarget->is64Bit())
279 addRegisterClass(MVT::i64, &X86::GR64RegClass);
281 for (MVT VT : MVT::integer_valuetypes())
282 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
284 // We don't accept any truncstore of integer registers.
285 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
286 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
287 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
288 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
289 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
290 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
292 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
294 // SETOEQ and SETUNE require checking two conditions.
295 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
296 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
297 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
298 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
299 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
300 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
302 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
304 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
305 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
306 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
308 if (Subtarget->is64Bit()) {
309 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
310 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
311 } else if (!TM.Options.UseSoftFloat) {
312 // We have an algorithm for SSE2->double, and we turn this into a
313 // 64-bit FILD followed by conditional FADD for other targets.
314 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
315 // We have an algorithm for SSE2, and we turn this into a 64-bit
316 // FILD for other targets.
317 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
320 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
322 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
323 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
325 if (!TM.Options.UseSoftFloat) {
326 // SSE has no i16 to fp conversion, only i32
327 if (X86ScalarSSEf32) {
328 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
329 // f32 and f64 cases are Legal, f80 case is not
330 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
332 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
333 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
336 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
337 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
340 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
341 // are Legal, f80 is custom lowered.
342 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
343 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
345 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
347 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
348 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
350 if (X86ScalarSSEf32) {
351 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
352 // f32 and f64 cases are Legal, f80 case is not
353 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
355 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
356 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
359 // Handle FP_TO_UINT by promoting the destination to a larger signed
361 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
362 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
363 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
365 if (Subtarget->is64Bit()) {
366 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
367 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
368 } else if (!TM.Options.UseSoftFloat) {
369 // Since AVX is a superset of SSE3, only check for SSE here.
370 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
371 // Expand FP_TO_UINT into a select.
372 // FIXME: We would like to use a Custom expander here eventually to do
373 // the optimal thing for SSE vs. the default expansion in the legalizer.
374 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
376 // With SSE3 we can use fisttpll to convert to a signed i64; without
377 // SSE, we're stuck with a fistpll.
378 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
381 if (isTargetFTOL()) {
382 // Use the _ftol2 runtime function, which has a pseudo-instruction
383 // to handle its weird calling convention.
384 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
387 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
388 if (!X86ScalarSSEf64) {
389 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
390 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
391 if (Subtarget->is64Bit()) {
392 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
393 // Without SSE, i64->f64 goes through memory.
394 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
398 // Scalar integer divide and remainder are lowered to use operations that
399 // produce two results, to match the available instructions. This exposes
400 // the two-result form to trivial CSE, which is able to combine x/y and x%y
401 // into a single instruction.
403 // Scalar integer multiply-high is also lowered to use two-result
404 // operations, to match the available instructions. However, plain multiply
405 // (low) operations are left as Legal, as there are single-result
406 // instructions for this in x86. Using the two-result multiply instructions
407 // when both high and low results are needed must be arranged by dagcombine.
408 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
410 setOperationAction(ISD::MULHS, VT, Expand);
411 setOperationAction(ISD::MULHU, VT, Expand);
412 setOperationAction(ISD::SDIV, VT, Expand);
413 setOperationAction(ISD::UDIV, VT, Expand);
414 setOperationAction(ISD::SREM, VT, Expand);
415 setOperationAction(ISD::UREM, VT, Expand);
417 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
418 setOperationAction(ISD::ADDC, VT, Custom);
419 setOperationAction(ISD::ADDE, VT, Custom);
420 setOperationAction(ISD::SUBC, VT, Custom);
421 setOperationAction(ISD::SUBE, VT, Custom);
424 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
425 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
426 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
427 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
428 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
429 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
430 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
431 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
432 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
433 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
434 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
435 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
436 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
437 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
438 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
439 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
440 if (Subtarget->is64Bit())
441 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
442 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
443 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
444 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
445 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
446 setOperationAction(ISD::FREM , MVT::f32 , Expand);
447 setOperationAction(ISD::FREM , MVT::f64 , Expand);
448 setOperationAction(ISD::FREM , MVT::f80 , Expand);
449 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
451 // Promote the i8 variants and force them on up to i32 which has a shorter
453 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
454 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
455 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
456 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
457 if (Subtarget->hasBMI()) {
458 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
459 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
460 if (Subtarget->is64Bit())
461 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
463 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
464 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
465 if (Subtarget->is64Bit())
466 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
469 if (Subtarget->hasLZCNT()) {
470 // When promoting the i8 variants, force them to i32 for a shorter
472 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
473 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
478 if (Subtarget->is64Bit())
479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
481 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
482 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
483 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
484 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
485 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
486 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
487 if (Subtarget->is64Bit()) {
488 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
489 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
493 // Special handling for half-precision floating point conversions.
494 // If we don't have F16C support, then lower half float conversions
495 // into library calls.
496 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
497 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
498 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
501 // There's never any support for operations beyond MVT::f32.
502 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
503 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
504 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
505 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
507 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
508 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
509 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
510 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
511 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
512 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
514 if (Subtarget->hasPOPCNT()) {
515 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
517 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
518 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
519 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
520 if (Subtarget->is64Bit())
521 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
524 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
526 if (!Subtarget->hasMOVBE())
527 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
529 // These should be promoted to a larger select which is supported.
530 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
531 // X86 wants to expand cmov itself.
532 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
533 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
534 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
535 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
536 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
537 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
538 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
539 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
540 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
541 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
542 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
543 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
544 if (Subtarget->is64Bit()) {
545 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
546 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
548 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
549 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
550 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
551 // support continuation, user-level threading, and etc.. As a result, no
552 // other SjLj exception interfaces are implemented and please don't build
553 // your own exception handling based on them.
554 // LLVM/Clang supports zero-cost DWARF exception handling.
555 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
556 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
559 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
560 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
561 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
562 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
563 if (Subtarget->is64Bit())
564 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
565 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
566 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
567 if (Subtarget->is64Bit()) {
568 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
569 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
570 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
571 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
572 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
574 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
575 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
576 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
577 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
578 if (Subtarget->is64Bit()) {
579 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
580 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
581 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
584 if (Subtarget->hasSSE1())
585 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
587 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
589 // Expand certain atomics
590 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
592 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
593 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
594 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
597 if (Subtarget->hasCmpxchg16b()) {
598 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
601 // FIXME - use subtarget debug flags
602 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
603 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
604 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
607 if (Subtarget->is64Bit()) {
608 setExceptionPointerRegister(X86::RAX);
609 setExceptionSelectorRegister(X86::RDX);
611 setExceptionPointerRegister(X86::EAX);
612 setExceptionSelectorRegister(X86::EDX);
614 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
615 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
617 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
618 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
620 setOperationAction(ISD::TRAP, MVT::Other, Legal);
621 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
623 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
624 setOperationAction(ISD::VASTART , MVT::Other, Custom);
625 setOperationAction(ISD::VAEND , MVT::Other, Expand);
626 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
627 // TargetInfo::X86_64ABIBuiltinVaList
628 setOperationAction(ISD::VAARG , MVT::Other, Custom);
629 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
631 // TargetInfo::CharPtrBuiltinVaList
632 setOperationAction(ISD::VAARG , MVT::Other, Expand);
633 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
636 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
637 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
639 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
641 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
642 // f32 and f64 use SSE.
643 // Set up the FP register classes.
644 addRegisterClass(MVT::f32, &X86::FR32RegClass);
645 addRegisterClass(MVT::f64, &X86::FR64RegClass);
647 // Use ANDPD to simulate FABS.
648 setOperationAction(ISD::FABS , MVT::f64, Custom);
649 setOperationAction(ISD::FABS , MVT::f32, Custom);
651 // Use XORP to simulate FNEG.
652 setOperationAction(ISD::FNEG , MVT::f64, Custom);
653 setOperationAction(ISD::FNEG , MVT::f32, Custom);
655 // Use ANDPD and ORPD to simulate FCOPYSIGN.
656 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
657 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
659 // Lower this to FGETSIGNx86 plus an AND.
660 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
661 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
663 // We don't support sin/cos/fmod
664 setOperationAction(ISD::FSIN , MVT::f64, Expand);
665 setOperationAction(ISD::FCOS , MVT::f64, Expand);
666 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
667 setOperationAction(ISD::FSIN , MVT::f32, Expand);
668 setOperationAction(ISD::FCOS , MVT::f32, Expand);
669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
671 // Expand FP immediates into loads from the stack, except for the special
673 addLegalFPImmediate(APFloat(+0.0)); // xorpd
674 addLegalFPImmediate(APFloat(+0.0f)); // xorps
675 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
676 // Use SSE for f32, x87 for f64.
677 // Set up the FP register classes.
678 addRegisterClass(MVT::f32, &X86::FR32RegClass);
679 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
681 // Use ANDPS to simulate FABS.
682 setOperationAction(ISD::FABS , MVT::f32, Custom);
684 // Use XORP to simulate FNEG.
685 setOperationAction(ISD::FNEG , MVT::f32, Custom);
687 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
689 // Use ANDPS and ORPS to simulate FCOPYSIGN.
690 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
691 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
693 // We don't support sin/cos/fmod
694 setOperationAction(ISD::FSIN , MVT::f32, Expand);
695 setOperationAction(ISD::FCOS , MVT::f32, Expand);
696 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
698 // Special cases we handle for FP constants.
699 addLegalFPImmediate(APFloat(+0.0f)); // xorps
700 addLegalFPImmediate(APFloat(+0.0)); // FLD0
701 addLegalFPImmediate(APFloat(+1.0)); // FLD1
702 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
703 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
705 if (!TM.Options.UnsafeFPMath) {
706 setOperationAction(ISD::FSIN , MVT::f64, Expand);
707 setOperationAction(ISD::FCOS , MVT::f64, Expand);
708 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
710 } else if (!TM.Options.UseSoftFloat) {
711 // f32 and f64 in x87.
712 // Set up the FP register classes.
713 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
714 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
716 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
717 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
718 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
719 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
721 if (!TM.Options.UnsafeFPMath) {
722 setOperationAction(ISD::FSIN , MVT::f64, Expand);
723 setOperationAction(ISD::FSIN , MVT::f32, Expand);
724 setOperationAction(ISD::FCOS , MVT::f64, Expand);
725 setOperationAction(ISD::FCOS , MVT::f32, Expand);
726 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
727 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
729 addLegalFPImmediate(APFloat(+0.0)); // FLD0
730 addLegalFPImmediate(APFloat(+1.0)); // FLD1
731 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
732 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
733 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
734 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
735 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
736 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
739 // We don't support FMA.
740 setOperationAction(ISD::FMA, MVT::f64, Expand);
741 setOperationAction(ISD::FMA, MVT::f32, Expand);
743 // Long double always uses X87.
744 if (!TM.Options.UseSoftFloat) {
745 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
746 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
747 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
749 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
750 addLegalFPImmediate(TmpFlt); // FLD0
752 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
755 APFloat TmpFlt2(+1.0);
756 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
758 addLegalFPImmediate(TmpFlt2); // FLD1
759 TmpFlt2.changeSign();
760 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
763 if (!TM.Options.UnsafeFPMath) {
764 setOperationAction(ISD::FSIN , MVT::f80, Expand);
765 setOperationAction(ISD::FCOS , MVT::f80, Expand);
766 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
769 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
770 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
771 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
772 setOperationAction(ISD::FRINT, MVT::f80, Expand);
773 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
774 setOperationAction(ISD::FMA, MVT::f80, Expand);
777 // Always use a library call for pow.
778 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
779 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
780 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
782 setOperationAction(ISD::FLOG, MVT::f80, Expand);
783 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
784 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
785 setOperationAction(ISD::FEXP, MVT::f80, Expand);
786 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
787 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
788 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
790 // First set operation action for all vector types to either promote
791 // (for widening) or expand (for scalarization). Then we will selectively
792 // turn on ones that can be effectively codegen'd.
793 for (MVT VT : MVT::vector_valuetypes()) {
794 setOperationAction(ISD::ADD , VT, Expand);
795 setOperationAction(ISD::SUB , VT, Expand);
796 setOperationAction(ISD::FADD, VT, Expand);
797 setOperationAction(ISD::FNEG, VT, Expand);
798 setOperationAction(ISD::FSUB, VT, Expand);
799 setOperationAction(ISD::MUL , VT, Expand);
800 setOperationAction(ISD::FMUL, VT, Expand);
801 setOperationAction(ISD::SDIV, VT, Expand);
802 setOperationAction(ISD::UDIV, VT, Expand);
803 setOperationAction(ISD::FDIV, VT, Expand);
804 setOperationAction(ISD::SREM, VT, Expand);
805 setOperationAction(ISD::UREM, VT, Expand);
806 setOperationAction(ISD::LOAD, VT, Expand);
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
808 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
809 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
810 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
811 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
812 setOperationAction(ISD::FABS, VT, Expand);
813 setOperationAction(ISD::FSIN, VT, Expand);
814 setOperationAction(ISD::FSINCOS, VT, Expand);
815 setOperationAction(ISD::FCOS, VT, Expand);
816 setOperationAction(ISD::FSINCOS, VT, Expand);
817 setOperationAction(ISD::FREM, VT, Expand);
818 setOperationAction(ISD::FMA, VT, Expand);
819 setOperationAction(ISD::FPOWI, VT, Expand);
820 setOperationAction(ISD::FSQRT, VT, Expand);
821 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
822 setOperationAction(ISD::FFLOOR, VT, Expand);
823 setOperationAction(ISD::FCEIL, VT, Expand);
824 setOperationAction(ISD::FTRUNC, VT, Expand);
825 setOperationAction(ISD::FRINT, VT, Expand);
826 setOperationAction(ISD::FNEARBYINT, VT, Expand);
827 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
828 setOperationAction(ISD::MULHS, VT, Expand);
829 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
830 setOperationAction(ISD::MULHU, VT, Expand);
831 setOperationAction(ISD::SDIVREM, VT, Expand);
832 setOperationAction(ISD::UDIVREM, VT, Expand);
833 setOperationAction(ISD::FPOW, VT, Expand);
834 setOperationAction(ISD::CTPOP, VT, Expand);
835 setOperationAction(ISD::CTTZ, VT, Expand);
836 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
837 setOperationAction(ISD::CTLZ, VT, Expand);
838 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
839 setOperationAction(ISD::SHL, VT, Expand);
840 setOperationAction(ISD::SRA, VT, Expand);
841 setOperationAction(ISD::SRL, VT, Expand);
842 setOperationAction(ISD::ROTL, VT, Expand);
843 setOperationAction(ISD::ROTR, VT, Expand);
844 setOperationAction(ISD::BSWAP, VT, Expand);
845 setOperationAction(ISD::SETCC, VT, Expand);
846 setOperationAction(ISD::FLOG, VT, Expand);
847 setOperationAction(ISD::FLOG2, VT, Expand);
848 setOperationAction(ISD::FLOG10, VT, Expand);
849 setOperationAction(ISD::FEXP, VT, Expand);
850 setOperationAction(ISD::FEXP2, VT, Expand);
851 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
852 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
853 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
854 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
855 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
856 setOperationAction(ISD::TRUNCATE, VT, Expand);
857 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
858 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
859 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
860 setOperationAction(ISD::VSELECT, VT, Expand);
861 setOperationAction(ISD::SELECT_CC, VT, Expand);
862 for (MVT InnerVT : MVT::vector_valuetypes()) {
863 setTruncStoreAction(InnerVT, VT, Expand);
865 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
866 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
868 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
869 // types, we have to deal with them whether we ask for Expansion or not.
870 // Setting Expand causes its own optimisation problems though, so leave
872 if (VT.getVectorElementType() == MVT::i1)
873 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
877 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
878 // with -msoft-float, disable use of MMX as well.
879 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
880 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
881 // No operations on x86mmx supported, everything uses intrinsics.
884 // MMX-sized vectors (other than x86mmx) are expected to be expanded
885 // into smaller operations.
886 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
887 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
888 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
889 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
890 setOperationAction(ISD::AND, MVT::v8i8, Expand);
891 setOperationAction(ISD::AND, MVT::v4i16, Expand);
892 setOperationAction(ISD::AND, MVT::v2i32, Expand);
893 setOperationAction(ISD::AND, MVT::v1i64, Expand);
894 setOperationAction(ISD::OR, MVT::v8i8, Expand);
895 setOperationAction(ISD::OR, MVT::v4i16, Expand);
896 setOperationAction(ISD::OR, MVT::v2i32, Expand);
897 setOperationAction(ISD::OR, MVT::v1i64, Expand);
898 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
899 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
900 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
901 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
902 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
903 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
904 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
905 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
907 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
908 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
909 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
910 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
911 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
912 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
913 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
914 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
916 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
917 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
919 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
920 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
921 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
922 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
923 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
924 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
925 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
926 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
927 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
928 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
929 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
930 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
931 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
932 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
935 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
936 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
938 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
939 // registers cannot be used even for integer operations.
940 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
941 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
942 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
943 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
945 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
946 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
947 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
948 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
949 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
950 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
951 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
952 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
953 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
954 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
955 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
956 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
957 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
958 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
959 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
960 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
961 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
962 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
963 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
964 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
965 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
966 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
968 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
969 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
970 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
971 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
973 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
974 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
976 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
977 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
979 // Only provide customized ctpop vector bit twiddling for vector types we
980 // know to perform better than using the popcnt instructions on each vector
981 // element. If popcnt isn't supported, always provide the custom version.
982 if (!Subtarget->hasPOPCNT()) {
983 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
984 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
987 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
988 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
989 MVT VT = (MVT::SimpleValueType)i;
990 // Do not attempt to custom lower non-power-of-2 vectors
991 if (!isPowerOf2_32(VT.getVectorNumElements()))
993 // Do not attempt to custom lower non-128-bit vectors
994 if (!VT.is128BitVector())
996 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
997 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
998 setOperationAction(ISD::VSELECT, VT, Custom);
999 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1002 // We support custom legalizing of sext and anyext loads for specific
1003 // memory vector types which we can load as a scalar (or sequence of
1004 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1005 // loads these must work with a single scalar load.
1006 for (MVT VT : MVT::integer_vector_valuetypes()) {
1007 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1008 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1009 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1010 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1011 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1012 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1013 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1014 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1015 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1018 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1019 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1020 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1021 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1022 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1023 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1024 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1025 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1027 if (Subtarget->is64Bit()) {
1028 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1029 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1032 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1033 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1034 MVT VT = (MVT::SimpleValueType)i;
1036 // Do not attempt to promote non-128-bit vectors
1037 if (!VT.is128BitVector())
1040 setOperationAction(ISD::AND, VT, Promote);
1041 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1042 setOperationAction(ISD::OR, VT, Promote);
1043 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1044 setOperationAction(ISD::XOR, VT, Promote);
1045 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1046 setOperationAction(ISD::LOAD, VT, Promote);
1047 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1048 setOperationAction(ISD::SELECT, VT, Promote);
1049 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1052 // Custom lower v2i64 and v2f64 selects.
1053 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1054 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1055 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1056 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1058 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1059 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1061 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1062 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1063 // As there is no 64-bit GPR available, we need build a special custom
1064 // sequence to convert from v2i32 to v2f32.
1065 if (!Subtarget->is64Bit())
1066 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1068 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1069 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1071 for (MVT VT : MVT::fp_vector_valuetypes())
1072 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1074 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1075 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1076 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1079 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1080 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1081 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1082 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1083 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1084 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1085 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1086 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1087 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1088 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1089 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1091 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1092 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1093 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1094 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1095 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1096 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1097 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1098 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1099 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1100 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1102 // FIXME: Do we need to handle scalar-to-vector here?
1103 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1105 // We directly match byte blends in the backend as they match the VSELECT
1107 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1109 // SSE41 brings specific instructions for doing vector sign extend even in
1110 // cases where we don't have SRA.
1111 for (MVT VT : MVT::integer_vector_valuetypes()) {
1112 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1113 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1114 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1117 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1118 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1119 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1120 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1121 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1122 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1123 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1125 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1126 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1127 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1128 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1129 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1130 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1132 // i8 and i16 vectors are custom because the source register and source
1133 // source memory operand types are not the same width. f32 vectors are
1134 // custom since the immediate controlling the insert encodes additional
1136 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1137 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1138 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1139 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1141 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1142 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1144 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1146 // FIXME: these should be Legal, but that's only for the case where
1147 // the index is constant. For now custom expand to deal with that.
1148 if (Subtarget->is64Bit()) {
1149 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1150 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1154 if (Subtarget->hasSSE2()) {
1155 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1156 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1158 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1159 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1161 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1162 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1164 // In the customized shift lowering, the legal cases in AVX2 will be
1166 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1167 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1169 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1170 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1172 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1175 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1176 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1177 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1178 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1179 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1180 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1181 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1183 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1184 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1185 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1187 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1188 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1190 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1191 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1192 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1193 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1194 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1195 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1196 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1197 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1198 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1200 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1201 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1202 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1203 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1204 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1205 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1206 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1207 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1209 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1210 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1211 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1213 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1214 // even though v8i16 is a legal type.
1215 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1216 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1217 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1219 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1220 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1221 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1223 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1224 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1226 for (MVT VT : MVT::fp_vector_valuetypes())
1227 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1229 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1230 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1232 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1233 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1235 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1236 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1238 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1239 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1240 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1241 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1243 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1244 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1245 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1247 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1248 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1249 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1250 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1251 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1252 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1253 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1254 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1255 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1256 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1257 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1258 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1260 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1261 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1262 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1263 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1264 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1265 setOperationAction(ISD::FMA, MVT::f32, Legal);
1266 setOperationAction(ISD::FMA, MVT::f64, Legal);
1269 if (Subtarget->hasInt256()) {
1270 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1271 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1272 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1273 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1275 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1276 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1277 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1278 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1280 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1281 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1282 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1283 // Don't lower v32i8 because there is no 128-bit byte mul
1285 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1286 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1287 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1288 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1290 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1291 // when we have a 256bit-wide blend with immediate.
1292 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1294 // Only provide customized ctpop vector bit twiddling for vector types we
1295 // know to perform better than using the popcnt instructions on each
1296 // vector element. If popcnt isn't supported, always provide the custom
1298 if (!Subtarget->hasPOPCNT())
1299 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1301 // Custom CTPOP always performs better on natively supported v8i32
1302 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1304 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1305 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1306 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1307 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1308 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1309 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1310 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1312 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1313 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1314 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1315 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1316 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1317 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1319 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1320 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1321 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1322 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1324 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1325 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1326 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1327 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1329 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1330 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1331 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1332 // Don't lower v32i8 because there is no 128-bit byte mul
1335 // In the customized shift lowering, the legal cases in AVX2 will be
1337 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1338 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1340 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1341 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1343 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1345 // Custom lower several nodes for 256-bit types.
1346 for (MVT VT : MVT::vector_valuetypes()) {
1347 if (VT.getScalarSizeInBits() >= 32) {
1348 setOperationAction(ISD::MLOAD, VT, Legal);
1349 setOperationAction(ISD::MSTORE, VT, Legal);
1351 // Extract subvector is special because the value type
1352 // (result) is 128-bit but the source is 256-bit wide.
1353 if (VT.is128BitVector()) {
1354 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1356 // Do not attempt to custom lower other non-256-bit vectors
1357 if (!VT.is256BitVector())
1360 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1361 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1362 setOperationAction(ISD::VSELECT, VT, Custom);
1363 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1364 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1365 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1366 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1367 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1370 if (Subtarget->hasInt256())
1371 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1374 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1375 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1376 MVT VT = (MVT::SimpleValueType)i;
1378 // Do not attempt to promote non-256-bit vectors
1379 if (!VT.is256BitVector())
1382 setOperationAction(ISD::AND, VT, Promote);
1383 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1384 setOperationAction(ISD::OR, VT, Promote);
1385 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1386 setOperationAction(ISD::XOR, VT, Promote);
1387 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1388 setOperationAction(ISD::LOAD, VT, Promote);
1389 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1390 setOperationAction(ISD::SELECT, VT, Promote);
1391 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1395 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1396 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1397 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1398 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1399 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1401 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1402 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1403 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1405 for (MVT VT : MVT::fp_vector_valuetypes())
1406 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1408 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1409 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1410 setOperationAction(ISD::XOR, MVT::i1, Legal);
1411 setOperationAction(ISD::OR, MVT::i1, Legal);
1412 setOperationAction(ISD::AND, MVT::i1, Legal);
1413 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1414 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1415 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1416 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1417 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1419 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1420 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1421 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1422 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1423 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1424 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1426 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1427 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1428 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1429 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1430 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1431 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1432 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1433 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1435 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1436 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1437 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1438 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1439 if (Subtarget->is64Bit()) {
1440 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1441 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1442 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1443 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1445 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1446 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1447 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1448 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1449 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1450 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1451 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1452 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1453 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1454 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1455 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1456 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1457 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1458 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1460 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1461 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1462 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1463 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1464 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1465 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1466 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1467 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1468 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1469 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1470 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1471 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1472 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1474 setOperationAction(ISD::FFLOOR, MVT::v16f32, Legal);
1475 setOperationAction(ISD::FFLOOR, MVT::v8f64, Legal);
1476 setOperationAction(ISD::FCEIL, MVT::v16f32, Legal);
1477 setOperationAction(ISD::FCEIL, MVT::v8f64, Legal);
1478 setOperationAction(ISD::FTRUNC, MVT::v16f32, Legal);
1479 setOperationAction(ISD::FTRUNC, MVT::v8f64, Legal);
1480 setOperationAction(ISD::FRINT, MVT::v16f32, Legal);
1481 setOperationAction(ISD::FRINT, MVT::v8f64, Legal);
1482 setOperationAction(ISD::FNEARBYINT, MVT::v16f32, Legal);
1483 setOperationAction(ISD::FNEARBYINT, MVT::v8f64, Legal);
1485 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1486 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1487 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1488 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1489 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1490 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1492 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1493 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1495 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1497 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1498 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1499 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1500 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1501 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1502 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1503 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1504 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1505 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1507 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1508 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1510 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1511 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1513 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1515 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1516 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1518 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1519 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1521 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1522 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1524 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1525 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1526 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1527 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1528 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1529 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1531 if (Subtarget->hasCDI()) {
1532 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1533 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1536 // Custom lower several nodes.
1537 for (MVT VT : MVT::vector_valuetypes()) {
1538 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1539 // Extract subvector is special because the value type
1540 // (result) is 256/128-bit but the source is 512-bit wide.
1541 if (VT.is128BitVector() || VT.is256BitVector()) {
1542 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1544 if (VT.getVectorElementType() == MVT::i1)
1545 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1547 // Do not attempt to custom lower other non-512-bit vectors
1548 if (!VT.is512BitVector())
1551 if ( EltSize >= 32) {
1552 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1553 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1554 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1555 setOperationAction(ISD::VSELECT, VT, Legal);
1556 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1557 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1558 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1559 setOperationAction(ISD::MLOAD, VT, Legal);
1560 setOperationAction(ISD::MSTORE, VT, Legal);
1563 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1564 MVT VT = (MVT::SimpleValueType)i;
1566 // Do not attempt to promote non-512-bit vectors.
1567 if (!VT.is512BitVector())
1570 setOperationAction(ISD::SELECT, VT, Promote);
1571 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1575 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1576 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1577 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1579 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1580 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1582 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1583 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1584 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1585 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1586 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1587 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1588 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1589 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1590 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1592 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1593 const MVT VT = (MVT::SimpleValueType)i;
1595 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1597 // Do not attempt to promote non-512-bit vectors.
1598 if (!VT.is512BitVector())
1602 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1603 setOperationAction(ISD::VSELECT, VT, Legal);
1608 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1609 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1610 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1612 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1613 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1614 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1616 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1617 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1618 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1619 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1620 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1621 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1624 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1625 // of this type with custom code.
1626 for (MVT VT : MVT::vector_valuetypes())
1627 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1629 // We want to custom lower some of our intrinsics.
1630 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1631 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1632 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1633 if (!Subtarget->is64Bit())
1634 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1636 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1637 // handle type legalization for these operations here.
1639 // FIXME: We really should do custom legalization for addition and
1640 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1641 // than generic legalization for 64-bit multiplication-with-overflow, though.
1642 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1643 // Add/Sub/Mul with overflow operations are custom lowered.
1645 setOperationAction(ISD::SADDO, VT, Custom);
1646 setOperationAction(ISD::UADDO, VT, Custom);
1647 setOperationAction(ISD::SSUBO, VT, Custom);
1648 setOperationAction(ISD::USUBO, VT, Custom);
1649 setOperationAction(ISD::SMULO, VT, Custom);
1650 setOperationAction(ISD::UMULO, VT, Custom);
1654 if (!Subtarget->is64Bit()) {
1655 // These libcalls are not available in 32-bit.
1656 setLibcallName(RTLIB::SHL_I128, nullptr);
1657 setLibcallName(RTLIB::SRL_I128, nullptr);
1658 setLibcallName(RTLIB::SRA_I128, nullptr);
1661 // Combine sin / cos into one node or libcall if possible.
1662 if (Subtarget->hasSinCos()) {
1663 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1664 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1665 if (Subtarget->isTargetDarwin()) {
1666 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1667 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1668 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1669 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1673 if (Subtarget->isTargetWin64()) {
1674 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1675 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1676 setOperationAction(ISD::SREM, MVT::i128, Custom);
1677 setOperationAction(ISD::UREM, MVT::i128, Custom);
1678 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1679 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1682 // We have target-specific dag combine patterns for the following nodes:
1683 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1684 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1685 setTargetDAGCombine(ISD::BITCAST);
1686 setTargetDAGCombine(ISD::VSELECT);
1687 setTargetDAGCombine(ISD::SELECT);
1688 setTargetDAGCombine(ISD::SHL);
1689 setTargetDAGCombine(ISD::SRA);
1690 setTargetDAGCombine(ISD::SRL);
1691 setTargetDAGCombine(ISD::OR);
1692 setTargetDAGCombine(ISD::AND);
1693 setTargetDAGCombine(ISD::ADD);
1694 setTargetDAGCombine(ISD::FADD);
1695 setTargetDAGCombine(ISD::FSUB);
1696 setTargetDAGCombine(ISD::FMA);
1697 setTargetDAGCombine(ISD::SUB);
1698 setTargetDAGCombine(ISD::LOAD);
1699 setTargetDAGCombine(ISD::MLOAD);
1700 setTargetDAGCombine(ISD::STORE);
1701 setTargetDAGCombine(ISD::MSTORE);
1702 setTargetDAGCombine(ISD::ZERO_EXTEND);
1703 setTargetDAGCombine(ISD::ANY_EXTEND);
1704 setTargetDAGCombine(ISD::SIGN_EXTEND);
1705 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1706 setTargetDAGCombine(ISD::TRUNCATE);
1707 setTargetDAGCombine(ISD::SINT_TO_FP);
1708 setTargetDAGCombine(ISD::SETCC);
1709 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1710 setTargetDAGCombine(ISD::BUILD_VECTOR);
1711 setTargetDAGCombine(ISD::MUL);
1712 setTargetDAGCombine(ISD::XOR);
1714 computeRegisterProperties();
1716 // On Darwin, -Os means optimize for size without hurting performance,
1717 // do not reduce the limit.
1718 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1719 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1720 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1721 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1722 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1723 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1724 setPrefLoopAlignment(4); // 2^4 bytes.
1726 // Predictable cmov don't hurt on atom because it's in-order.
1727 PredictableSelectIsExpensive = !Subtarget->isAtom();
1728 EnableExtLdPromotion = true;
1729 setPrefFunctionAlignment(4); // 2^4 bytes.
1731 verifyIntrinsicTables();
1734 // This has so far only been implemented for 64-bit MachO.
1735 bool X86TargetLowering::useLoadStackGuardNode() const {
1736 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1739 TargetLoweringBase::LegalizeTypeAction
1740 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1741 if (ExperimentalVectorWideningLegalization &&
1742 VT.getVectorNumElements() != 1 &&
1743 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1744 return TypeWidenVector;
1746 return TargetLoweringBase::getPreferredVectorAction(VT);
1749 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1751 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1753 const unsigned NumElts = VT.getVectorNumElements();
1754 const EVT EltVT = VT.getVectorElementType();
1755 if (VT.is512BitVector()) {
1756 if (Subtarget->hasAVX512())
1757 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1758 EltVT == MVT::f32 || EltVT == MVT::f64)
1760 case 8: return MVT::v8i1;
1761 case 16: return MVT::v16i1;
1763 if (Subtarget->hasBWI())
1764 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1766 case 32: return MVT::v32i1;
1767 case 64: return MVT::v64i1;
1771 if (VT.is256BitVector() || VT.is128BitVector()) {
1772 if (Subtarget->hasVLX())
1773 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1774 EltVT == MVT::f32 || EltVT == MVT::f64)
1776 case 2: return MVT::v2i1;
1777 case 4: return MVT::v4i1;
1778 case 8: return MVT::v8i1;
1780 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1781 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1783 case 8: return MVT::v8i1;
1784 case 16: return MVT::v16i1;
1785 case 32: return MVT::v32i1;
1789 return VT.changeVectorElementTypeToInteger();
1792 /// Helper for getByValTypeAlignment to determine
1793 /// the desired ByVal argument alignment.
1794 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1797 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1798 if (VTy->getBitWidth() == 128)
1800 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1801 unsigned EltAlign = 0;
1802 getMaxByValAlign(ATy->getElementType(), EltAlign);
1803 if (EltAlign > MaxAlign)
1804 MaxAlign = EltAlign;
1805 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1806 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1807 unsigned EltAlign = 0;
1808 getMaxByValAlign(STy->getElementType(i), EltAlign);
1809 if (EltAlign > MaxAlign)
1810 MaxAlign = EltAlign;
1817 /// Return the desired alignment for ByVal aggregate
1818 /// function arguments in the caller parameter area. For X86, aggregates
1819 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1820 /// are at 4-byte boundaries.
1821 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1822 if (Subtarget->is64Bit()) {
1823 // Max of 8 and alignment of type.
1824 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1831 if (Subtarget->hasSSE1())
1832 getMaxByValAlign(Ty, Align);
1836 /// Returns the target specific optimal type for load
1837 /// and store operations as a result of memset, memcpy, and memmove
1838 /// lowering. If DstAlign is zero that means it's safe to destination
1839 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1840 /// means there isn't a need to check it against alignment requirement,
1841 /// probably because the source does not need to be loaded. If 'IsMemset' is
1842 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1843 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1844 /// source is constant so it does not need to be loaded.
1845 /// It returns EVT::Other if the type should be determined using generic
1846 /// target-independent logic.
1848 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1849 unsigned DstAlign, unsigned SrcAlign,
1850 bool IsMemset, bool ZeroMemset,
1852 MachineFunction &MF) const {
1853 const Function *F = MF.getFunction();
1854 if ((!IsMemset || ZeroMemset) &&
1855 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1857 (Subtarget->isUnalignedMemAccessFast() ||
1858 ((DstAlign == 0 || DstAlign >= 16) &&
1859 (SrcAlign == 0 || SrcAlign >= 16)))) {
1861 if (Subtarget->hasInt256())
1863 if (Subtarget->hasFp256())
1866 if (Subtarget->hasSSE2())
1868 if (Subtarget->hasSSE1())
1870 } else if (!MemcpyStrSrc && Size >= 8 &&
1871 !Subtarget->is64Bit() &&
1872 Subtarget->hasSSE2()) {
1873 // Do not use f64 to lower memcpy if source is string constant. It's
1874 // better to use i32 to avoid the loads.
1878 if (Subtarget->is64Bit() && Size >= 8)
1883 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1885 return X86ScalarSSEf32;
1886 else if (VT == MVT::f64)
1887 return X86ScalarSSEf64;
1892 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1897 *Fast = Subtarget->isUnalignedMemAccessFast();
1901 /// Return the entry encoding for a jump table in the
1902 /// current function. The returned value is a member of the
1903 /// MachineJumpTableInfo::JTEntryKind enum.
1904 unsigned X86TargetLowering::getJumpTableEncoding() const {
1905 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1907 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1908 Subtarget->isPICStyleGOT())
1909 return MachineJumpTableInfo::EK_Custom32;
1911 // Otherwise, use the normal jump table encoding heuristics.
1912 return TargetLowering::getJumpTableEncoding();
1916 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1917 const MachineBasicBlock *MBB,
1918 unsigned uid,MCContext &Ctx) const{
1919 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1920 Subtarget->isPICStyleGOT());
1921 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1923 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1924 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1927 /// Returns relocation base for the given PIC jumptable.
1928 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1929 SelectionDAG &DAG) const {
1930 if (!Subtarget->is64Bit())
1931 // This doesn't have SDLoc associated with it, but is not really the
1932 // same as a Register.
1933 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1937 /// This returns the relocation base for the given PIC jumptable,
1938 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1939 const MCExpr *X86TargetLowering::
1940 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1941 MCContext &Ctx) const {
1942 // X86-64 uses RIP relative addressing based on the jump table label.
1943 if (Subtarget->isPICStyleRIPRel())
1944 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1946 // Otherwise, the reference is relative to the PIC base.
1947 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1950 // FIXME: Why this routine is here? Move to RegInfo!
1951 std::pair<const TargetRegisterClass*, uint8_t>
1952 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1953 const TargetRegisterClass *RRC = nullptr;
1955 switch (VT.SimpleTy) {
1957 return TargetLowering::findRepresentativeClass(VT);
1958 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1959 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1962 RRC = &X86::VR64RegClass;
1964 case MVT::f32: case MVT::f64:
1965 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1966 case MVT::v4f32: case MVT::v2f64:
1967 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1969 RRC = &X86::VR128RegClass;
1972 return std::make_pair(RRC, Cost);
1975 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1976 unsigned &Offset) const {
1977 if (!Subtarget->isTargetLinux())
1980 if (Subtarget->is64Bit()) {
1981 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1983 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1995 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1996 unsigned DestAS) const {
1997 assert(SrcAS != DestAS && "Expected different address spaces!");
1999 return SrcAS < 256 && DestAS < 256;
2002 //===----------------------------------------------------------------------===//
2003 // Return Value Calling Convention Implementation
2004 //===----------------------------------------------------------------------===//
2006 #include "X86GenCallingConv.inc"
2009 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2010 MachineFunction &MF, bool isVarArg,
2011 const SmallVectorImpl<ISD::OutputArg> &Outs,
2012 LLVMContext &Context) const {
2013 SmallVector<CCValAssign, 16> RVLocs;
2014 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2015 return CCInfo.CheckReturn(Outs, RetCC_X86);
2018 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2019 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2024 X86TargetLowering::LowerReturn(SDValue Chain,
2025 CallingConv::ID CallConv, bool isVarArg,
2026 const SmallVectorImpl<ISD::OutputArg> &Outs,
2027 const SmallVectorImpl<SDValue> &OutVals,
2028 SDLoc dl, SelectionDAG &DAG) const {
2029 MachineFunction &MF = DAG.getMachineFunction();
2030 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2032 SmallVector<CCValAssign, 16> RVLocs;
2033 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2034 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2037 SmallVector<SDValue, 6> RetOps;
2038 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2039 // Operand #1 = Bytes To Pop
2040 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2043 // Copy the result values into the output registers.
2044 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2045 CCValAssign &VA = RVLocs[i];
2046 assert(VA.isRegLoc() && "Can only return in registers!");
2047 SDValue ValToCopy = OutVals[i];
2048 EVT ValVT = ValToCopy.getValueType();
2050 // Promote values to the appropriate types.
2051 if (VA.getLocInfo() == CCValAssign::SExt)
2052 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2053 else if (VA.getLocInfo() == CCValAssign::ZExt)
2054 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2055 else if (VA.getLocInfo() == CCValAssign::AExt)
2056 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2057 else if (VA.getLocInfo() == CCValAssign::BCvt)
2058 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2060 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2061 "Unexpected FP-extend for return value.");
2063 // If this is x86-64, and we disabled SSE, we can't return FP values,
2064 // or SSE or MMX vectors.
2065 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2066 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2067 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2068 report_fatal_error("SSE register return with SSE disabled");
2070 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2071 // llvm-gcc has never done it right and no one has noticed, so this
2072 // should be OK for now.
2073 if (ValVT == MVT::f64 &&
2074 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2075 report_fatal_error("SSE2 register return with SSE2 disabled");
2077 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2078 // the RET instruction and handled by the FP Stackifier.
2079 if (VA.getLocReg() == X86::FP0 ||
2080 VA.getLocReg() == X86::FP1) {
2081 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2082 // change the value to the FP stack register class.
2083 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2084 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2085 RetOps.push_back(ValToCopy);
2086 // Don't emit a copytoreg.
2090 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2091 // which is returned in RAX / RDX.
2092 if (Subtarget->is64Bit()) {
2093 if (ValVT == MVT::x86mmx) {
2094 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2095 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2096 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2098 // If we don't have SSE2 available, convert to v4f32 so the generated
2099 // register is legal.
2100 if (!Subtarget->hasSSE2())
2101 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2106 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2107 Flag = Chain.getValue(1);
2108 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2111 // The x86-64 ABIs require that for returning structs by value we copy
2112 // the sret argument into %rax/%eax (depending on ABI) for the return.
2113 // Win32 requires us to put the sret argument to %eax as well.
2114 // We saved the argument into a virtual register in the entry block,
2115 // so now we copy the value out and into %rax/%eax.
2117 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2118 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2119 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2120 // either case FuncInfo->setSRetReturnReg() will have been called.
2121 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2122 assert((Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) &&
2123 "No need for an sret register");
2124 SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg, getPointerTy());
2127 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2128 X86::RAX : X86::EAX;
2129 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2130 Flag = Chain.getValue(1);
2132 // RAX/EAX now acts like a return value.
2133 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2136 RetOps[0] = Chain; // Update chain.
2138 // Add the flag if we have it.
2140 RetOps.push_back(Flag);
2142 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2145 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2146 if (N->getNumValues() != 1)
2148 if (!N->hasNUsesOfValue(1, 0))
2151 SDValue TCChain = Chain;
2152 SDNode *Copy = *N->use_begin();
2153 if (Copy->getOpcode() == ISD::CopyToReg) {
2154 // If the copy has a glue operand, we conservatively assume it isn't safe to
2155 // perform a tail call.
2156 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2158 TCChain = Copy->getOperand(0);
2159 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2162 bool HasRet = false;
2163 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2165 if (UI->getOpcode() != X86ISD::RET_FLAG)
2167 // If we are returning more than one value, we can definitely
2168 // not make a tail call see PR19530
2169 if (UI->getNumOperands() > 4)
2171 if (UI->getNumOperands() == 4 &&
2172 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2185 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2186 ISD::NodeType ExtendKind) const {
2188 // TODO: Is this also valid on 32-bit?
2189 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2190 ReturnMVT = MVT::i8;
2192 ReturnMVT = MVT::i32;
2194 EVT MinVT = getRegisterType(Context, ReturnMVT);
2195 return VT.bitsLT(MinVT) ? MinVT : VT;
2198 /// Lower the result values of a call into the
2199 /// appropriate copies out of appropriate physical registers.
2202 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2203 CallingConv::ID CallConv, bool isVarArg,
2204 const SmallVectorImpl<ISD::InputArg> &Ins,
2205 SDLoc dl, SelectionDAG &DAG,
2206 SmallVectorImpl<SDValue> &InVals) const {
2208 // Assign locations to each value returned by this call.
2209 SmallVector<CCValAssign, 16> RVLocs;
2210 bool Is64Bit = Subtarget->is64Bit();
2211 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2213 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2215 // Copy all of the result registers out of their specified physreg.
2216 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2217 CCValAssign &VA = RVLocs[i];
2218 EVT CopyVT = VA.getValVT();
2220 // If this is x86-64, and we disabled SSE, we can't return FP values
2221 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2222 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2223 report_fatal_error("SSE register return with SSE disabled");
2226 // If we prefer to use the value in xmm registers, copy it out as f80 and
2227 // use a truncate to move it from fp stack reg to xmm reg.
2228 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2229 isScalarFPTypeInSSEReg(VA.getValVT()))
2232 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2233 CopyVT, InFlag).getValue(1);
2234 SDValue Val = Chain.getValue(0);
2236 if (CopyVT != VA.getValVT())
2237 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2238 // This truncation won't change the value.
2239 DAG.getIntPtrConstant(1));
2241 InFlag = Chain.getValue(2);
2242 InVals.push_back(Val);
2248 //===----------------------------------------------------------------------===//
2249 // C & StdCall & Fast Calling Convention implementation
2250 //===----------------------------------------------------------------------===//
2251 // StdCall calling convention seems to be standard for many Windows' API
2252 // routines and around. It differs from C calling convention just a little:
2253 // callee should clean up the stack, not caller. Symbols should be also
2254 // decorated in some fancy way :) It doesn't support any vector arguments.
2255 // For info on fast calling convention see Fast Calling Convention (tail call)
2256 // implementation LowerX86_32FastCCCallTo.
2258 /// CallIsStructReturn - Determines whether a call uses struct return
2260 enum StructReturnType {
2265 static StructReturnType
2266 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2268 return NotStructReturn;
2270 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2271 if (!Flags.isSRet())
2272 return NotStructReturn;
2273 if (Flags.isInReg())
2274 return RegStructReturn;
2275 return StackStructReturn;
2278 /// Determines whether a function uses struct return semantics.
2279 static StructReturnType
2280 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2282 return NotStructReturn;
2284 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2285 if (!Flags.isSRet())
2286 return NotStructReturn;
2287 if (Flags.isInReg())
2288 return RegStructReturn;
2289 return StackStructReturn;
2292 /// Make a copy of an aggregate at address specified by "Src" to address
2293 /// "Dst" with size and alignment information specified by the specific
2294 /// parameter attribute. The copy will be passed as a byval function parameter.
2296 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2297 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2299 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2301 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2302 /*isVolatile*/false, /*AlwaysInline=*/true,
2303 MachinePointerInfo(), MachinePointerInfo());
2306 /// Return true if the calling convention is one that
2307 /// supports tail call optimization.
2308 static bool IsTailCallConvention(CallingConv::ID CC) {
2309 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2310 CC == CallingConv::HiPE);
2313 /// \brief Return true if the calling convention is a C calling convention.
2314 static bool IsCCallConvention(CallingConv::ID CC) {
2315 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2316 CC == CallingConv::X86_64_SysV);
2319 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2320 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2324 CallingConv::ID CalleeCC = CS.getCallingConv();
2325 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2331 /// Return true if the function is being made into
2332 /// a tailcall target by changing its ABI.
2333 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2334 bool GuaranteedTailCallOpt) {
2335 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2339 X86TargetLowering::LowerMemArgument(SDValue Chain,
2340 CallingConv::ID CallConv,
2341 const SmallVectorImpl<ISD::InputArg> &Ins,
2342 SDLoc dl, SelectionDAG &DAG,
2343 const CCValAssign &VA,
2344 MachineFrameInfo *MFI,
2346 // Create the nodes corresponding to a load from this parameter slot.
2347 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2348 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2349 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2350 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2353 // If value is passed by pointer we have address passed instead of the value
2355 if (VA.getLocInfo() == CCValAssign::Indirect)
2356 ValVT = VA.getLocVT();
2358 ValVT = VA.getValVT();
2360 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2361 // changed with more analysis.
2362 // In case of tail call optimization mark all arguments mutable. Since they
2363 // could be overwritten by lowering of arguments in case of a tail call.
2364 if (Flags.isByVal()) {
2365 unsigned Bytes = Flags.getByValSize();
2366 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2367 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2368 return DAG.getFrameIndex(FI, getPointerTy());
2370 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2371 VA.getLocMemOffset(), isImmutable);
2372 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2373 return DAG.getLoad(ValVT, dl, Chain, FIN,
2374 MachinePointerInfo::getFixedStack(FI),
2375 false, false, false, 0);
2379 // FIXME: Get this from tablegen.
2380 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2381 const X86Subtarget *Subtarget) {
2382 assert(Subtarget->is64Bit());
2384 if (Subtarget->isCallingConvWin64(CallConv)) {
2385 static const MCPhysReg GPR64ArgRegsWin64[] = {
2386 X86::RCX, X86::RDX, X86::R8, X86::R9
2388 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2391 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2392 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2394 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2397 // FIXME: Get this from tablegen.
2398 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2399 CallingConv::ID CallConv,
2400 const X86Subtarget *Subtarget) {
2401 assert(Subtarget->is64Bit());
2402 if (Subtarget->isCallingConvWin64(CallConv)) {
2403 // The XMM registers which might contain var arg parameters are shadowed
2404 // in their paired GPR. So we only need to save the GPR to their home
2406 // TODO: __vectorcall will change this.
2410 const Function *Fn = MF.getFunction();
2411 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2412 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2413 "SSE register cannot be used when SSE is disabled!");
2414 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2415 !Subtarget->hasSSE1())
2416 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2420 static const MCPhysReg XMMArgRegs64Bit[] = {
2421 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2422 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2424 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2428 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2429 CallingConv::ID CallConv,
2431 const SmallVectorImpl<ISD::InputArg> &Ins,
2434 SmallVectorImpl<SDValue> &InVals)
2436 MachineFunction &MF = DAG.getMachineFunction();
2437 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2439 const Function* Fn = MF.getFunction();
2440 if (Fn->hasExternalLinkage() &&
2441 Subtarget->isTargetCygMing() &&
2442 Fn->getName() == "main")
2443 FuncInfo->setForceFramePointer(true);
2445 MachineFrameInfo *MFI = MF.getFrameInfo();
2446 bool Is64Bit = Subtarget->is64Bit();
2447 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2449 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2450 "Var args not supported with calling convention fastcc, ghc or hipe");
2452 // Assign locations to all of the incoming arguments.
2453 SmallVector<CCValAssign, 16> ArgLocs;
2454 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2456 // Allocate shadow area for Win64
2458 CCInfo.AllocateStack(32, 8);
2460 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2462 unsigned LastVal = ~0U;
2464 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2465 CCValAssign &VA = ArgLocs[i];
2466 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2468 assert(VA.getValNo() != LastVal &&
2469 "Don't support value assigned to multiple locs yet");
2471 LastVal = VA.getValNo();
2473 if (VA.isRegLoc()) {
2474 EVT RegVT = VA.getLocVT();
2475 const TargetRegisterClass *RC;
2476 if (RegVT == MVT::i32)
2477 RC = &X86::GR32RegClass;
2478 else if (Is64Bit && RegVT == MVT::i64)
2479 RC = &X86::GR64RegClass;
2480 else if (RegVT == MVT::f32)
2481 RC = &X86::FR32RegClass;
2482 else if (RegVT == MVT::f64)
2483 RC = &X86::FR64RegClass;
2484 else if (RegVT.is512BitVector())
2485 RC = &X86::VR512RegClass;
2486 else if (RegVT.is256BitVector())
2487 RC = &X86::VR256RegClass;
2488 else if (RegVT.is128BitVector())
2489 RC = &X86::VR128RegClass;
2490 else if (RegVT == MVT::x86mmx)
2491 RC = &X86::VR64RegClass;
2492 else if (RegVT == MVT::i1)
2493 RC = &X86::VK1RegClass;
2494 else if (RegVT == MVT::v8i1)
2495 RC = &X86::VK8RegClass;
2496 else if (RegVT == MVT::v16i1)
2497 RC = &X86::VK16RegClass;
2498 else if (RegVT == MVT::v32i1)
2499 RC = &X86::VK32RegClass;
2500 else if (RegVT == MVT::v64i1)
2501 RC = &X86::VK64RegClass;
2503 llvm_unreachable("Unknown argument type!");
2505 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2506 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2508 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2509 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2511 if (VA.getLocInfo() == CCValAssign::SExt)
2512 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2513 DAG.getValueType(VA.getValVT()));
2514 else if (VA.getLocInfo() == CCValAssign::ZExt)
2515 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2516 DAG.getValueType(VA.getValVT()));
2517 else if (VA.getLocInfo() == CCValAssign::BCvt)
2518 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2520 if (VA.isExtInLoc()) {
2521 // Handle MMX values passed in XMM regs.
2522 if (RegVT.isVector())
2523 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2525 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2528 assert(VA.isMemLoc());
2529 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2532 // If value is passed via pointer - do a load.
2533 if (VA.getLocInfo() == CCValAssign::Indirect)
2534 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2535 MachinePointerInfo(), false, false, false, 0);
2537 InVals.push_back(ArgValue);
2540 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2541 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2542 // The x86-64 ABIs require that for returning structs by value we copy
2543 // the sret argument into %rax/%eax (depending on ABI) for the return.
2544 // Win32 requires us to put the sret argument to %eax as well.
2545 // Save the argument into a virtual register so that we can access it
2546 // from the return points.
2547 if (Ins[i].Flags.isSRet()) {
2548 unsigned Reg = FuncInfo->getSRetReturnReg();
2550 MVT PtrTy = getPointerTy();
2551 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2552 FuncInfo->setSRetReturnReg(Reg);
2554 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2555 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2561 unsigned StackSize = CCInfo.getNextStackOffset();
2562 // Align stack specially for tail calls.
2563 if (FuncIsMadeTailCallSafe(CallConv,
2564 MF.getTarget().Options.GuaranteedTailCallOpt))
2565 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2567 // If the function takes variable number of arguments, make a frame index for
2568 // the start of the first vararg value... for expansion of llvm.va_start. We
2569 // can skip this if there are no va_start calls.
2570 if (MFI->hasVAStart() &&
2571 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2572 CallConv != CallingConv::X86_ThisCall))) {
2573 FuncInfo->setVarArgsFrameIndex(
2574 MFI->CreateFixedObject(1, StackSize, true));
2577 // Figure out if XMM registers are in use.
2578 assert(!(MF.getTarget().Options.UseSoftFloat &&
2579 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
2580 "SSE register cannot be used when SSE is disabled!");
2582 // 64-bit calling conventions support varargs and register parameters, so we
2583 // have to do extra work to spill them in the prologue.
2584 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2585 // Find the first unallocated argument registers.
2586 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2587 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2588 unsigned NumIntRegs =
2589 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2590 unsigned NumXMMRegs =
2591 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2592 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2593 "SSE register cannot be used when SSE is disabled!");
2595 // Gather all the live in physical registers.
2596 SmallVector<SDValue, 6> LiveGPRs;
2597 SmallVector<SDValue, 8> LiveXMMRegs;
2599 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2600 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2602 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2604 if (!ArgXMMs.empty()) {
2605 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2606 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2607 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2608 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2609 LiveXMMRegs.push_back(
2610 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2615 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2616 // Get to the caller-allocated home save location. Add 8 to account
2617 // for the return address.
2618 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2619 FuncInfo->setRegSaveFrameIndex(
2620 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2621 // Fixup to set vararg frame on shadow area (4 x i64).
2623 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2625 // For X86-64, if there are vararg parameters that are passed via
2626 // registers, then we must store them to their spots on the stack so
2627 // they may be loaded by deferencing the result of va_next.
2628 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2629 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2630 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2631 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2634 // Store the integer parameter registers.
2635 SmallVector<SDValue, 8> MemOps;
2636 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2638 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2639 for (SDValue Val : LiveGPRs) {
2640 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2641 DAG.getIntPtrConstant(Offset));
2643 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2644 MachinePointerInfo::getFixedStack(
2645 FuncInfo->getRegSaveFrameIndex(), Offset),
2647 MemOps.push_back(Store);
2651 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2652 // Now store the XMM (fp + vector) parameter registers.
2653 SmallVector<SDValue, 12> SaveXMMOps;
2654 SaveXMMOps.push_back(Chain);
2655 SaveXMMOps.push_back(ALVal);
2656 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2657 FuncInfo->getRegSaveFrameIndex()));
2658 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2659 FuncInfo->getVarArgsFPOffset()));
2660 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2662 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2663 MVT::Other, SaveXMMOps));
2666 if (!MemOps.empty())
2667 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2670 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2671 // Find the largest legal vector type.
2672 MVT VecVT = MVT::Other;
2673 // FIXME: Only some x86_32 calling conventions support AVX512.
2674 if (Subtarget->hasAVX512() &&
2675 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2676 CallConv == CallingConv::Intel_OCL_BI)))
2677 VecVT = MVT::v16f32;
2678 else if (Subtarget->hasAVX())
2680 else if (Subtarget->hasSSE2())
2683 // We forward some GPRs and some vector types.
2684 SmallVector<MVT, 2> RegParmTypes;
2685 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2686 RegParmTypes.push_back(IntVT);
2687 if (VecVT != MVT::Other)
2688 RegParmTypes.push_back(VecVT);
2690 // Compute the set of forwarded registers. The rest are scratch.
2691 SmallVectorImpl<ForwardedRegister> &Forwards =
2692 FuncInfo->getForwardedMustTailRegParms();
2693 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2695 // Conservatively forward AL on x86_64, since it might be used for varargs.
2696 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2697 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2698 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2701 // Copy all forwards from physical to virtual registers.
2702 for (ForwardedRegister &F : Forwards) {
2703 // FIXME: Can we use a less constrained schedule?
2704 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2705 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2706 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2710 // Some CCs need callee pop.
2711 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2712 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2713 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2715 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2716 // If this is an sret function, the return should pop the hidden pointer.
2717 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2718 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2719 argsAreStructReturn(Ins) == StackStructReturn)
2720 FuncInfo->setBytesToPopOnReturn(4);
2724 // RegSaveFrameIndex is X86-64 only.
2725 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2726 if (CallConv == CallingConv::X86_FastCall ||
2727 CallConv == CallingConv::X86_ThisCall)
2728 // fastcc functions can't have varargs.
2729 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2732 FuncInfo->setArgumentStackSize(StackSize);
2738 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2739 SDValue StackPtr, SDValue Arg,
2740 SDLoc dl, SelectionDAG &DAG,
2741 const CCValAssign &VA,
2742 ISD::ArgFlagsTy Flags) const {
2743 unsigned LocMemOffset = VA.getLocMemOffset();
2744 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2745 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2746 if (Flags.isByVal())
2747 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2749 return DAG.getStore(Chain, dl, Arg, PtrOff,
2750 MachinePointerInfo::getStack(LocMemOffset),
2754 /// Emit a load of return address if tail call
2755 /// optimization is performed and it is required.
2757 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2758 SDValue &OutRetAddr, SDValue Chain,
2759 bool IsTailCall, bool Is64Bit,
2760 int FPDiff, SDLoc dl) const {
2761 // Adjust the Return address stack slot.
2762 EVT VT = getPointerTy();
2763 OutRetAddr = getReturnAddressFrameIndex(DAG);
2765 // Load the "old" Return address.
2766 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2767 false, false, false, 0);
2768 return SDValue(OutRetAddr.getNode(), 1);
2771 /// Emit a store of the return address if tail call
2772 /// optimization is performed and it is required (FPDiff!=0).
2773 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2774 SDValue Chain, SDValue RetAddrFrIdx,
2775 EVT PtrVT, unsigned SlotSize,
2776 int FPDiff, SDLoc dl) {
2777 // Store the return address to the appropriate stack slot.
2778 if (!FPDiff) return Chain;
2779 // Calculate the new stack slot for the return address.
2780 int NewReturnAddrFI =
2781 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2783 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2784 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2785 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2791 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2792 SmallVectorImpl<SDValue> &InVals) const {
2793 SelectionDAG &DAG = CLI.DAG;
2795 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2796 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2797 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2798 SDValue Chain = CLI.Chain;
2799 SDValue Callee = CLI.Callee;
2800 CallingConv::ID CallConv = CLI.CallConv;
2801 bool &isTailCall = CLI.IsTailCall;
2802 bool isVarArg = CLI.IsVarArg;
2804 MachineFunction &MF = DAG.getMachineFunction();
2805 bool Is64Bit = Subtarget->is64Bit();
2806 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2807 StructReturnType SR = callIsStructReturn(Outs);
2808 bool IsSibcall = false;
2809 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2811 if (MF.getTarget().Options.DisableTailCalls)
2814 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2816 // Force this to be a tail call. The verifier rules are enough to ensure
2817 // that we can lower this successfully without moving the return address
2820 } else if (isTailCall) {
2821 // Check if it's really possible to do a tail call.
2822 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2823 isVarArg, SR != NotStructReturn,
2824 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2825 Outs, OutVals, Ins, DAG);
2827 // Sibcalls are automatically detected tailcalls which do not require
2829 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2836 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2837 "Var args not supported with calling convention fastcc, ghc or hipe");
2839 // Analyze operands of the call, assigning locations to each operand.
2840 SmallVector<CCValAssign, 16> ArgLocs;
2841 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2843 // Allocate shadow area for Win64
2845 CCInfo.AllocateStack(32, 8);
2847 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2849 // Get a count of how many bytes are to be pushed on the stack.
2850 unsigned NumBytes = CCInfo.getNextStackOffset();
2852 // This is a sibcall. The memory operands are available in caller's
2853 // own caller's stack.
2855 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2856 IsTailCallConvention(CallConv))
2857 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2860 if (isTailCall && !IsSibcall && !IsMustTail) {
2861 // Lower arguments at fp - stackoffset + fpdiff.
2862 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2864 FPDiff = NumBytesCallerPushed - NumBytes;
2866 // Set the delta of movement of the returnaddr stackslot.
2867 // But only set if delta is greater than previous delta.
2868 if (FPDiff < X86Info->getTCReturnAddrDelta())
2869 X86Info->setTCReturnAddrDelta(FPDiff);
2872 unsigned NumBytesToPush = NumBytes;
2873 unsigned NumBytesToPop = NumBytes;
2875 // If we have an inalloca argument, all stack space has already been allocated
2876 // for us and be right at the top of the stack. We don't support multiple
2877 // arguments passed in memory when using inalloca.
2878 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2880 if (!ArgLocs.back().isMemLoc())
2881 report_fatal_error("cannot use inalloca attribute on a register "
2883 if (ArgLocs.back().getLocMemOffset() != 0)
2884 report_fatal_error("any parameter with the inalloca attribute must be "
2885 "the only memory argument");
2889 Chain = DAG.getCALLSEQ_START(
2890 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2892 SDValue RetAddrFrIdx;
2893 // Load return address for tail calls.
2894 if (isTailCall && FPDiff)
2895 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2896 Is64Bit, FPDiff, dl);
2898 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2899 SmallVector<SDValue, 8> MemOpChains;
2902 // Walk the register/memloc assignments, inserting copies/loads. In the case
2903 // of tail call optimization arguments are handle later.
2904 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2905 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2906 // Skip inalloca arguments, they have already been written.
2907 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2908 if (Flags.isInAlloca())
2911 CCValAssign &VA = ArgLocs[i];
2912 EVT RegVT = VA.getLocVT();
2913 SDValue Arg = OutVals[i];
2914 bool isByVal = Flags.isByVal();
2916 // Promote the value if needed.
2917 switch (VA.getLocInfo()) {
2918 default: llvm_unreachable("Unknown loc info!");
2919 case CCValAssign::Full: break;
2920 case CCValAssign::SExt:
2921 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2923 case CCValAssign::ZExt:
2924 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2926 case CCValAssign::AExt:
2927 if (RegVT.is128BitVector()) {
2928 // Special case: passing MMX values in XMM registers.
2929 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2930 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2931 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2933 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2935 case CCValAssign::BCvt:
2936 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2938 case CCValAssign::Indirect: {
2939 // Store the argument.
2940 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2941 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2942 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2943 MachinePointerInfo::getFixedStack(FI),
2950 if (VA.isRegLoc()) {
2951 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2952 if (isVarArg && IsWin64) {
2953 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2954 // shadow reg if callee is a varargs function.
2955 unsigned ShadowReg = 0;
2956 switch (VA.getLocReg()) {
2957 case X86::XMM0: ShadowReg = X86::RCX; break;
2958 case X86::XMM1: ShadowReg = X86::RDX; break;
2959 case X86::XMM2: ShadowReg = X86::R8; break;
2960 case X86::XMM3: ShadowReg = X86::R9; break;
2963 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2965 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2966 assert(VA.isMemLoc());
2967 if (!StackPtr.getNode())
2968 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2970 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2971 dl, DAG, VA, Flags));
2975 if (!MemOpChains.empty())
2976 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2978 if (Subtarget->isPICStyleGOT()) {
2979 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2982 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2983 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2985 // If we are tail calling and generating PIC/GOT style code load the
2986 // address of the callee into ECX. The value in ecx is used as target of
2987 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2988 // for tail calls on PIC/GOT architectures. Normally we would just put the
2989 // address of GOT into ebx and then call target@PLT. But for tail calls
2990 // ebx would be restored (since ebx is callee saved) before jumping to the
2993 // Note: The actual moving to ECX is done further down.
2994 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2995 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2996 !G->getGlobal()->hasProtectedVisibility())
2997 Callee = LowerGlobalAddress(Callee, DAG);
2998 else if (isa<ExternalSymbolSDNode>(Callee))
2999 Callee = LowerExternalSymbol(Callee, DAG);
3003 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
3004 // From AMD64 ABI document:
3005 // For calls that may call functions that use varargs or stdargs
3006 // (prototype-less calls or calls to functions containing ellipsis (...) in
3007 // the declaration) %al is used as hidden argument to specify the number
3008 // of SSE registers used. The contents of %al do not need to match exactly
3009 // the number of registers, but must be an ubound on the number of SSE
3010 // registers used and is in the range 0 - 8 inclusive.
3012 // Count the number of XMM registers allocated.
3013 static const MCPhysReg XMMArgRegs[] = {
3014 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3015 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3017 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3018 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3019 && "SSE registers cannot be used when SSE is disabled");
3021 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3022 DAG.getConstant(NumXMMRegs, MVT::i8)));
3025 if (isVarArg && IsMustTail) {
3026 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3027 for (const auto &F : Forwards) {
3028 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3029 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3033 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3034 // don't need this because the eligibility check rejects calls that require
3035 // shuffling arguments passed in memory.
3036 if (!IsSibcall && isTailCall) {
3037 // Force all the incoming stack arguments to be loaded from the stack
3038 // before any new outgoing arguments are stored to the stack, because the
3039 // outgoing stack slots may alias the incoming argument stack slots, and
3040 // the alias isn't otherwise explicit. This is slightly more conservative
3041 // than necessary, because it means that each store effectively depends
3042 // on every argument instead of just those arguments it would clobber.
3043 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3045 SmallVector<SDValue, 8> MemOpChains2;
3048 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3049 CCValAssign &VA = ArgLocs[i];
3052 assert(VA.isMemLoc());
3053 SDValue Arg = OutVals[i];
3054 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3055 // Skip inalloca arguments. They don't require any work.
3056 if (Flags.isInAlloca())
3058 // Create frame index.
3059 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3060 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3061 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3062 FIN = DAG.getFrameIndex(FI, getPointerTy());
3064 if (Flags.isByVal()) {
3065 // Copy relative to framepointer.
3066 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3067 if (!StackPtr.getNode())
3068 StackPtr = DAG.getCopyFromReg(Chain, dl,
3069 RegInfo->getStackRegister(),
3071 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3073 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3077 // Store relative to framepointer.
3078 MemOpChains2.push_back(
3079 DAG.getStore(ArgChain, dl, Arg, FIN,
3080 MachinePointerInfo::getFixedStack(FI),
3085 if (!MemOpChains2.empty())
3086 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3088 // Store the return address to the appropriate stack slot.
3089 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3090 getPointerTy(), RegInfo->getSlotSize(),
3094 // Build a sequence of copy-to-reg nodes chained together with token chain
3095 // and flag operands which copy the outgoing args into registers.
3097 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3098 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3099 RegsToPass[i].second, InFlag);
3100 InFlag = Chain.getValue(1);
3103 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3104 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3105 // In the 64-bit large code model, we have to make all calls
3106 // through a register, since the call instruction's 32-bit
3107 // pc-relative offset may not be large enough to hold the whole
3109 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3110 // If the callee is a GlobalAddress node (quite common, every direct call
3111 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3113 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3115 // We should use extra load for direct calls to dllimported functions in
3117 const GlobalValue *GV = G->getGlobal();
3118 if (!GV->hasDLLImportStorageClass()) {
3119 unsigned char OpFlags = 0;
3120 bool ExtraLoad = false;
3121 unsigned WrapperKind = ISD::DELETED_NODE;
3123 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3124 // external symbols most go through the PLT in PIC mode. If the symbol
3125 // has hidden or protected visibility, or if it is static or local, then
3126 // we don't need to use the PLT - we can directly call it.
3127 if (Subtarget->isTargetELF() &&
3128 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3129 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3130 OpFlags = X86II::MO_PLT;
3131 } else if (Subtarget->isPICStyleStubAny() &&
3132 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3133 (!Subtarget->getTargetTriple().isMacOSX() ||
3134 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3135 // PC-relative references to external symbols should go through $stub,
3136 // unless we're building with the leopard linker or later, which
3137 // automatically synthesizes these stubs.
3138 OpFlags = X86II::MO_DARWIN_STUB;
3139 } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
3140 cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
3141 // If the function is marked as non-lazy, generate an indirect call
3142 // which loads from the GOT directly. This avoids runtime overhead
3143 // at the cost of eager binding (and one extra byte of encoding).
3144 OpFlags = X86II::MO_GOTPCREL;
3145 WrapperKind = X86ISD::WrapperRIP;
3149 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3150 G->getOffset(), OpFlags);
3152 // Add a wrapper if needed.
3153 if (WrapperKind != ISD::DELETED_NODE)
3154 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3155 // Add extra indirection if needed.
3157 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3158 MachinePointerInfo::getGOT(),
3159 false, false, false, 0);
3161 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3162 unsigned char OpFlags = 0;
3164 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3165 // external symbols should go through the PLT.
3166 if (Subtarget->isTargetELF() &&
3167 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3168 OpFlags = X86II::MO_PLT;
3169 } else if (Subtarget->isPICStyleStubAny() &&
3170 (!Subtarget->getTargetTriple().isMacOSX() ||
3171 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3172 // PC-relative references to external symbols should go through $stub,
3173 // unless we're building with the leopard linker or later, which
3174 // automatically synthesizes these stubs.
3175 OpFlags = X86II::MO_DARWIN_STUB;
3178 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3180 } else if (Subtarget->isTarget64BitILP32() &&
3181 Callee->getValueType(0) == MVT::i32) {
3182 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3183 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3186 // Returns a chain & a flag for retval copy to use.
3187 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3188 SmallVector<SDValue, 8> Ops;
3190 if (!IsSibcall && isTailCall) {
3191 Chain = DAG.getCALLSEQ_END(Chain,
3192 DAG.getIntPtrConstant(NumBytesToPop, true),
3193 DAG.getIntPtrConstant(0, true), InFlag, dl);
3194 InFlag = Chain.getValue(1);
3197 Ops.push_back(Chain);
3198 Ops.push_back(Callee);
3201 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3203 // Add argument registers to the end of the list so that they are known live
3205 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3206 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3207 RegsToPass[i].second.getValueType()));
3209 // Add a register mask operand representing the call-preserved registers.
3210 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3211 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3212 assert(Mask && "Missing call preserved mask for calling convention");
3213 Ops.push_back(DAG.getRegisterMask(Mask));
3215 if (InFlag.getNode())
3216 Ops.push_back(InFlag);
3220 //// If this is the first return lowered for this function, add the regs
3221 //// to the liveout set for the function.
3222 // This isn't right, although it's probably harmless on x86; liveouts
3223 // should be computed from returns not tail calls. Consider a void
3224 // function making a tail call to a function returning int.
3225 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3228 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3229 InFlag = Chain.getValue(1);
3231 // Create the CALLSEQ_END node.
3232 unsigned NumBytesForCalleeToPop;
3233 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3234 DAG.getTarget().Options.GuaranteedTailCallOpt))
3235 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3236 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3237 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3238 SR == StackStructReturn)
3239 // If this is a call to a struct-return function, the callee
3240 // pops the hidden struct pointer, so we have to push it back.
3241 // This is common for Darwin/X86, Linux & Mingw32 targets.
3242 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3243 NumBytesForCalleeToPop = 4;
3245 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3247 // Returns a flag for retval copy to use.
3249 Chain = DAG.getCALLSEQ_END(Chain,
3250 DAG.getIntPtrConstant(NumBytesToPop, true),
3251 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3254 InFlag = Chain.getValue(1);
3257 // Handle result values, copying them out of physregs into vregs that we
3259 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3260 Ins, dl, DAG, InVals);
3263 //===----------------------------------------------------------------------===//
3264 // Fast Calling Convention (tail call) implementation
3265 //===----------------------------------------------------------------------===//
3267 // Like std call, callee cleans arguments, convention except that ECX is
3268 // reserved for storing the tail called function address. Only 2 registers are
3269 // free for argument passing (inreg). Tail call optimization is performed
3271 // * tailcallopt is enabled
3272 // * caller/callee are fastcc
3273 // On X86_64 architecture with GOT-style position independent code only local
3274 // (within module) calls are supported at the moment.
3275 // To keep the stack aligned according to platform abi the function
3276 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3277 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3278 // If a tail called function callee has more arguments than the caller the
3279 // caller needs to make sure that there is room to move the RETADDR to. This is
3280 // achieved by reserving an area the size of the argument delta right after the
3281 // original RETADDR, but before the saved framepointer or the spilled registers
3282 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3294 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3295 /// for a 16 byte align requirement.
3297 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3298 SelectionDAG& DAG) const {
3299 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3300 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3301 unsigned StackAlignment = TFI.getStackAlignment();
3302 uint64_t AlignMask = StackAlignment - 1;
3303 int64_t Offset = StackSize;
3304 unsigned SlotSize = RegInfo->getSlotSize();
3305 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3306 // Number smaller than 12 so just add the difference.
3307 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3309 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3310 Offset = ((~AlignMask) & Offset) + StackAlignment +
3311 (StackAlignment-SlotSize);
3316 /// MatchingStackOffset - Return true if the given stack call argument is
3317 /// already available in the same position (relatively) of the caller's
3318 /// incoming argument stack.
3320 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3321 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3322 const X86InstrInfo *TII) {
3323 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3325 if (Arg.getOpcode() == ISD::CopyFromReg) {
3326 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3327 if (!TargetRegisterInfo::isVirtualRegister(VR))
3329 MachineInstr *Def = MRI->getVRegDef(VR);
3332 if (!Flags.isByVal()) {
3333 if (!TII->isLoadFromStackSlot(Def, FI))
3336 unsigned Opcode = Def->getOpcode();
3337 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3338 Opcode == X86::LEA64_32r) &&
3339 Def->getOperand(1).isFI()) {
3340 FI = Def->getOperand(1).getIndex();
3341 Bytes = Flags.getByValSize();
3345 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3346 if (Flags.isByVal())
3347 // ByVal argument is passed in as a pointer but it's now being
3348 // dereferenced. e.g.
3349 // define @foo(%struct.X* %A) {
3350 // tail call @bar(%struct.X* byval %A)
3353 SDValue Ptr = Ld->getBasePtr();
3354 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3357 FI = FINode->getIndex();
3358 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3359 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3360 FI = FINode->getIndex();
3361 Bytes = Flags.getByValSize();
3365 assert(FI != INT_MAX);
3366 if (!MFI->isFixedObjectIndex(FI))
3368 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3371 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3372 /// for tail call optimization. Targets which want to do tail call
3373 /// optimization should implement this function.
3375 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3376 CallingConv::ID CalleeCC,
3378 bool isCalleeStructRet,
3379 bool isCallerStructRet,
3381 const SmallVectorImpl<ISD::OutputArg> &Outs,
3382 const SmallVectorImpl<SDValue> &OutVals,
3383 const SmallVectorImpl<ISD::InputArg> &Ins,
3384 SelectionDAG &DAG) const {
3385 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3388 // If -tailcallopt is specified, make fastcc functions tail-callable.
3389 const MachineFunction &MF = DAG.getMachineFunction();
3390 const Function *CallerF = MF.getFunction();
3392 // If the function return type is x86_fp80 and the callee return type is not,
3393 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3394 // perform a tailcall optimization here.
3395 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3398 CallingConv::ID CallerCC = CallerF->getCallingConv();
3399 bool CCMatch = CallerCC == CalleeCC;
3400 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3401 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3403 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3404 if (IsTailCallConvention(CalleeCC) && CCMatch)
3409 // Look for obvious safe cases to perform tail call optimization that do not
3410 // require ABI changes. This is what gcc calls sibcall.
3412 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3413 // emit a special epilogue.
3414 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3415 if (RegInfo->needsStackRealignment(MF))
3418 // Also avoid sibcall optimization if either caller or callee uses struct
3419 // return semantics.
3420 if (isCalleeStructRet || isCallerStructRet)
3423 // An stdcall/thiscall caller is expected to clean up its arguments; the
3424 // callee isn't going to do that.
3425 // FIXME: this is more restrictive than needed. We could produce a tailcall
3426 // when the stack adjustment matches. For example, with a thiscall that takes
3427 // only one argument.
3428 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3429 CallerCC == CallingConv::X86_ThisCall))
3432 // Do not sibcall optimize vararg calls unless all arguments are passed via
3434 if (isVarArg && !Outs.empty()) {
3436 // Optimizing for varargs on Win64 is unlikely to be safe without
3437 // additional testing.
3438 if (IsCalleeWin64 || IsCallerWin64)
3441 SmallVector<CCValAssign, 16> ArgLocs;
3442 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3445 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3446 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3447 if (!ArgLocs[i].isRegLoc())
3451 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3452 // stack. Therefore, if it's not used by the call it is not safe to optimize
3453 // this into a sibcall.
3454 bool Unused = false;
3455 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3462 SmallVector<CCValAssign, 16> RVLocs;
3463 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3465 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3466 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3467 CCValAssign &VA = RVLocs[i];
3468 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3473 // If the calling conventions do not match, then we'd better make sure the
3474 // results are returned in the same way as what the caller expects.
3476 SmallVector<CCValAssign, 16> RVLocs1;
3477 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3479 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3481 SmallVector<CCValAssign, 16> RVLocs2;
3482 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3484 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3486 if (RVLocs1.size() != RVLocs2.size())
3488 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3489 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3491 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3493 if (RVLocs1[i].isRegLoc()) {
3494 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3497 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3503 // If the callee takes no arguments then go on to check the results of the
3505 if (!Outs.empty()) {
3506 // Check if stack adjustment is needed. For now, do not do this if any
3507 // argument is passed on the stack.
3508 SmallVector<CCValAssign, 16> ArgLocs;
3509 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3512 // Allocate shadow area for Win64
3514 CCInfo.AllocateStack(32, 8);
3516 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3517 if (CCInfo.getNextStackOffset()) {
3518 MachineFunction &MF = DAG.getMachineFunction();
3519 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3522 // Check if the arguments are already laid out in the right way as
3523 // the caller's fixed stack objects.
3524 MachineFrameInfo *MFI = MF.getFrameInfo();
3525 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3526 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3527 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3528 CCValAssign &VA = ArgLocs[i];
3529 SDValue Arg = OutVals[i];
3530 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3531 if (VA.getLocInfo() == CCValAssign::Indirect)
3533 if (!VA.isRegLoc()) {
3534 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3541 // If the tailcall address may be in a register, then make sure it's
3542 // possible to register allocate for it. In 32-bit, the call address can
3543 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3544 // callee-saved registers are restored. These happen to be the same
3545 // registers used to pass 'inreg' arguments so watch out for those.
3546 if (!Subtarget->is64Bit() &&
3547 ((!isa<GlobalAddressSDNode>(Callee) &&
3548 !isa<ExternalSymbolSDNode>(Callee)) ||
3549 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3550 unsigned NumInRegs = 0;
3551 // In PIC we need an extra register to formulate the address computation
3553 unsigned MaxInRegs =
3554 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3556 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3557 CCValAssign &VA = ArgLocs[i];
3560 unsigned Reg = VA.getLocReg();
3563 case X86::EAX: case X86::EDX: case X86::ECX:
3564 if (++NumInRegs == MaxInRegs)
3576 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3577 const TargetLibraryInfo *libInfo) const {
3578 return X86::createFastISel(funcInfo, libInfo);
3581 //===----------------------------------------------------------------------===//
3582 // Other Lowering Hooks
3583 //===----------------------------------------------------------------------===//
3585 static bool MayFoldLoad(SDValue Op) {
3586 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3589 static bool MayFoldIntoStore(SDValue Op) {
3590 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3593 static bool isTargetShuffle(unsigned Opcode) {
3595 default: return false;
3596 case X86ISD::BLENDI:
3597 case X86ISD::PSHUFB:
3598 case X86ISD::PSHUFD:
3599 case X86ISD::PSHUFHW:
3600 case X86ISD::PSHUFLW:
3602 case X86ISD::PALIGNR:
3603 case X86ISD::MOVLHPS:
3604 case X86ISD::MOVLHPD:
3605 case X86ISD::MOVHLPS:
3606 case X86ISD::MOVLPS:
3607 case X86ISD::MOVLPD:
3608 case X86ISD::MOVSHDUP:
3609 case X86ISD::MOVSLDUP:
3610 case X86ISD::MOVDDUP:
3613 case X86ISD::UNPCKL:
3614 case X86ISD::UNPCKH:
3615 case X86ISD::VPERMILPI:
3616 case X86ISD::VPERM2X128:
3617 case X86ISD::VPERMI:
3622 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3623 SDValue V1, SelectionDAG &DAG) {
3625 default: llvm_unreachable("Unknown x86 shuffle node");
3626 case X86ISD::MOVSHDUP:
3627 case X86ISD::MOVSLDUP:
3628 case X86ISD::MOVDDUP:
3629 return DAG.getNode(Opc, dl, VT, V1);
3633 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3634 SDValue V1, unsigned TargetMask,
3635 SelectionDAG &DAG) {
3637 default: llvm_unreachable("Unknown x86 shuffle node");
3638 case X86ISD::PSHUFD:
3639 case X86ISD::PSHUFHW:
3640 case X86ISD::PSHUFLW:
3641 case X86ISD::VPERMILPI:
3642 case X86ISD::VPERMI:
3643 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3647 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3648 SDValue V1, SDValue V2, unsigned TargetMask,
3649 SelectionDAG &DAG) {
3651 default: llvm_unreachable("Unknown x86 shuffle node");
3652 case X86ISD::PALIGNR:
3653 case X86ISD::VALIGN:
3655 case X86ISD::VPERM2X128:
3656 return DAG.getNode(Opc, dl, VT, V1, V2,
3657 DAG.getConstant(TargetMask, MVT::i8));
3661 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3662 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3664 default: llvm_unreachable("Unknown x86 shuffle node");
3665 case X86ISD::MOVLHPS:
3666 case X86ISD::MOVLHPD:
3667 case X86ISD::MOVHLPS:
3668 case X86ISD::MOVLPS:
3669 case X86ISD::MOVLPD:
3672 case X86ISD::UNPCKL:
3673 case X86ISD::UNPCKH:
3674 return DAG.getNode(Opc, dl, VT, V1, V2);
3678 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3679 MachineFunction &MF = DAG.getMachineFunction();
3680 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3681 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3682 int ReturnAddrIndex = FuncInfo->getRAIndex();
3684 if (ReturnAddrIndex == 0) {
3685 // Set up a frame object for the return address.
3686 unsigned SlotSize = RegInfo->getSlotSize();
3687 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3690 FuncInfo->setRAIndex(ReturnAddrIndex);
3693 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3696 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3697 bool hasSymbolicDisplacement) {
3698 // Offset should fit into 32 bit immediate field.
3699 if (!isInt<32>(Offset))
3702 // If we don't have a symbolic displacement - we don't have any extra
3704 if (!hasSymbolicDisplacement)
3707 // FIXME: Some tweaks might be needed for medium code model.
3708 if (M != CodeModel::Small && M != CodeModel::Kernel)
3711 // For small code model we assume that latest object is 16MB before end of 31
3712 // bits boundary. We may also accept pretty large negative constants knowing
3713 // that all objects are in the positive half of address space.
3714 if (M == CodeModel::Small && Offset < 16*1024*1024)
3717 // For kernel code model we know that all object resist in the negative half
3718 // of 32bits address space. We may not accept negative offsets, since they may
3719 // be just off and we may accept pretty large positive ones.
3720 if (M == CodeModel::Kernel && Offset >= 0)
3726 /// isCalleePop - Determines whether the callee is required to pop its
3727 /// own arguments. Callee pop is necessary to support tail calls.
3728 bool X86::isCalleePop(CallingConv::ID CallingConv,
3729 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3730 switch (CallingConv) {
3733 case CallingConv::X86_StdCall:
3734 case CallingConv::X86_FastCall:
3735 case CallingConv::X86_ThisCall:
3737 case CallingConv::Fast:
3738 case CallingConv::GHC:
3739 case CallingConv::HiPE:
3746 /// \brief Return true if the condition is an unsigned comparison operation.
3747 static bool isX86CCUnsigned(unsigned X86CC) {
3749 default: llvm_unreachable("Invalid integer condition!");
3750 case X86::COND_E: return true;
3751 case X86::COND_G: return false;
3752 case X86::COND_GE: return false;
3753 case X86::COND_L: return false;
3754 case X86::COND_LE: return false;
3755 case X86::COND_NE: return true;
3756 case X86::COND_B: return true;
3757 case X86::COND_A: return true;
3758 case X86::COND_BE: return true;
3759 case X86::COND_AE: return true;
3761 llvm_unreachable("covered switch fell through?!");
3764 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3765 /// specific condition code, returning the condition code and the LHS/RHS of the
3766 /// comparison to make.
3767 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3768 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3770 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3771 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3772 // X > -1 -> X == 0, jump !sign.
3773 RHS = DAG.getConstant(0, RHS.getValueType());
3774 return X86::COND_NS;
3776 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3777 // X < 0 -> X == 0, jump on sign.
3780 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3782 RHS = DAG.getConstant(0, RHS.getValueType());
3783 return X86::COND_LE;
3787 switch (SetCCOpcode) {
3788 default: llvm_unreachable("Invalid integer condition!");
3789 case ISD::SETEQ: return X86::COND_E;
3790 case ISD::SETGT: return X86::COND_G;
3791 case ISD::SETGE: return X86::COND_GE;
3792 case ISD::SETLT: return X86::COND_L;
3793 case ISD::SETLE: return X86::COND_LE;
3794 case ISD::SETNE: return X86::COND_NE;
3795 case ISD::SETULT: return X86::COND_B;
3796 case ISD::SETUGT: return X86::COND_A;
3797 case ISD::SETULE: return X86::COND_BE;
3798 case ISD::SETUGE: return X86::COND_AE;
3802 // First determine if it is required or is profitable to flip the operands.
3804 // If LHS is a foldable load, but RHS is not, flip the condition.
3805 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3806 !ISD::isNON_EXTLoad(RHS.getNode())) {
3807 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3808 std::swap(LHS, RHS);
3811 switch (SetCCOpcode) {
3817 std::swap(LHS, RHS);
3821 // On a floating point condition, the flags are set as follows:
3823 // 0 | 0 | 0 | X > Y
3824 // 0 | 0 | 1 | X < Y
3825 // 1 | 0 | 0 | X == Y
3826 // 1 | 1 | 1 | unordered
3827 switch (SetCCOpcode) {
3828 default: llvm_unreachable("Condcode should be pre-legalized away");
3830 case ISD::SETEQ: return X86::COND_E;
3831 case ISD::SETOLT: // flipped
3833 case ISD::SETGT: return X86::COND_A;
3834 case ISD::SETOLE: // flipped
3836 case ISD::SETGE: return X86::COND_AE;
3837 case ISD::SETUGT: // flipped
3839 case ISD::SETLT: return X86::COND_B;
3840 case ISD::SETUGE: // flipped
3842 case ISD::SETLE: return X86::COND_BE;
3844 case ISD::SETNE: return X86::COND_NE;
3845 case ISD::SETUO: return X86::COND_P;
3846 case ISD::SETO: return X86::COND_NP;
3848 case ISD::SETUNE: return X86::COND_INVALID;
3852 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3853 /// code. Current x86 isa includes the following FP cmov instructions:
3854 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3855 static bool hasFPCMov(unsigned X86CC) {
3871 /// isFPImmLegal - Returns true if the target can instruction select the
3872 /// specified FP immediate natively. If false, the legalizer will
3873 /// materialize the FP immediate as a load from a constant pool.
3874 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3875 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3876 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3882 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3883 ISD::LoadExtType ExtTy,
3885 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3886 // relocation target a movq or addq instruction: don't let the load shrink.
3887 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3888 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3889 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3890 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3894 /// \brief Returns true if it is beneficial to convert a load of a constant
3895 /// to just the constant itself.
3896 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3898 assert(Ty->isIntegerTy());
3900 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3901 if (BitSize == 0 || BitSize > 64)
3906 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3907 unsigned Index) const {
3908 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3911 return (Index == 0 || Index == ResVT.getVectorNumElements());
3914 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3915 // Speculate cttz only if we can directly use TZCNT.
3916 return Subtarget->hasBMI();
3919 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3920 // Speculate ctlz only if we can directly use LZCNT.
3921 return Subtarget->hasLZCNT();
3924 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3925 /// the specified range (L, H].
3926 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3927 return (Val < 0) || (Val >= Low && Val < Hi);
3930 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3931 /// specified value.
3932 static bool isUndefOrEqual(int Val, int CmpVal) {
3933 return (Val < 0 || Val == CmpVal);
3936 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3937 /// from position Pos and ending in Pos+Size, falls within the specified
3938 /// sequential range (Low, Low+Size]. or is undef.
3939 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3940 unsigned Pos, unsigned Size, int Low) {
3941 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3942 if (!isUndefOrEqual(Mask[i], Low))
3947 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3948 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3949 /// operand - by default will match for first operand.
3950 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3951 bool TestSecondOperand = false) {
3952 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3953 VT != MVT::v2f64 && VT != MVT::v2i64)
3956 unsigned NumElems = VT.getVectorNumElements();
3957 unsigned Lo = TestSecondOperand ? NumElems : 0;
3958 unsigned Hi = Lo + NumElems;
3960 for (unsigned i = 0; i < NumElems; ++i)
3961 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3967 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3968 /// is suitable for input to PSHUFHW.
3969 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3970 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3973 // Lower quadword copied in order or undef.
3974 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3977 // Upper quadword shuffled.
3978 for (unsigned i = 4; i != 8; ++i)
3979 if (!isUndefOrInRange(Mask[i], 4, 8))
3982 if (VT == MVT::v16i16) {
3983 // Lower quadword copied in order or undef.
3984 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3987 // Upper quadword shuffled.
3988 for (unsigned i = 12; i != 16; ++i)
3989 if (!isUndefOrInRange(Mask[i], 12, 16))
3996 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3997 /// is suitable for input to PSHUFLW.
3998 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3999 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
4002 // Upper quadword copied in order.
4003 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
4006 // Lower quadword shuffled.
4007 for (unsigned i = 0; i != 4; ++i)
4008 if (!isUndefOrInRange(Mask[i], 0, 4))
4011 if (VT == MVT::v16i16) {
4012 // Upper quadword copied in order.
4013 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4016 // Lower quadword shuffled.
4017 for (unsigned i = 8; i != 12; ++i)
4018 if (!isUndefOrInRange(Mask[i], 8, 12))
4025 /// \brief Return true if the mask specifies a shuffle of elements that is
4026 /// suitable for input to intralane (palignr) or interlane (valign) vector
4028 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4029 unsigned NumElts = VT.getVectorNumElements();
4030 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4031 unsigned NumLaneElts = NumElts/NumLanes;
4033 // Do not handle 64-bit element shuffles with palignr.
4034 if (NumLaneElts == 2)
4037 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4039 for (i = 0; i != NumLaneElts; ++i) {
4044 // Lane is all undef, go to next lane
4045 if (i == NumLaneElts)
4048 int Start = Mask[i+l];
4050 // Make sure its in this lane in one of the sources
4051 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4052 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4055 // If not lane 0, then we must match lane 0
4056 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4059 // Correct second source to be contiguous with first source
4060 if (Start >= (int)NumElts)
4061 Start -= NumElts - NumLaneElts;
4063 // Make sure we're shifting in the right direction.
4064 if (Start <= (int)(i+l))
4069 // Check the rest of the elements to see if they are consecutive.
4070 for (++i; i != NumLaneElts; ++i) {
4071 int Idx = Mask[i+l];
4073 // Make sure its in this lane
4074 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4075 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4078 // If not lane 0, then we must match lane 0
4079 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4082 if (Idx >= (int)NumElts)
4083 Idx -= NumElts - NumLaneElts;
4085 if (!isUndefOrEqual(Idx, Start+i))
4094 /// \brief Return true if the node specifies a shuffle of elements that is
4095 /// suitable for input to PALIGNR.
4096 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4097 const X86Subtarget *Subtarget) {
4098 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4099 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4100 VT.is512BitVector())
4101 // FIXME: Add AVX512BW.
4104 return isAlignrMask(Mask, VT, false);
4107 /// \brief Return true if the node specifies a shuffle of elements that is
4108 /// suitable for input to VALIGN.
4109 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4110 const X86Subtarget *Subtarget) {
4111 // FIXME: Add AVX512VL.
4112 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4114 return isAlignrMask(Mask, VT, true);
4117 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4118 /// the two vector operands have swapped position.
4119 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4120 unsigned NumElems) {
4121 for (unsigned i = 0; i != NumElems; ++i) {
4125 else if (idx < (int)NumElems)
4126 Mask[i] = idx + NumElems;
4128 Mask[i] = idx - NumElems;
4132 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4133 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4134 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4135 /// reverse of what x86 shuffles want.
4136 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4138 unsigned NumElems = VT.getVectorNumElements();
4139 unsigned NumLanes = VT.getSizeInBits()/128;
4140 unsigned NumLaneElems = NumElems/NumLanes;
4142 if (NumLaneElems != 2 && NumLaneElems != 4)
4145 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4146 bool symmetricMaskRequired =
4147 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4149 // VSHUFPSY divides the resulting vector into 4 chunks.
4150 // The sources are also splitted into 4 chunks, and each destination
4151 // chunk must come from a different source chunk.
4153 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4154 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4156 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4157 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4159 // VSHUFPDY divides the resulting vector into 4 chunks.
4160 // The sources are also splitted into 4 chunks, and each destination
4161 // chunk must come from a different source chunk.
4163 // SRC1 => X3 X2 X1 X0
4164 // SRC2 => Y3 Y2 Y1 Y0
4166 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4168 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4169 unsigned HalfLaneElems = NumLaneElems/2;
4170 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4171 for (unsigned i = 0; i != NumLaneElems; ++i) {
4172 int Idx = Mask[i+l];
4173 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4174 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4176 // For VSHUFPSY, the mask of the second half must be the same as the
4177 // first but with the appropriate offsets. This works in the same way as
4178 // VPERMILPS works with masks.
4179 if (!symmetricMaskRequired || Idx < 0)
4181 if (MaskVal[i] < 0) {
4182 MaskVal[i] = Idx - l;
4185 if ((signed)(Idx - l) != MaskVal[i])
4193 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4194 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4195 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4196 if (!VT.is128BitVector())
4199 unsigned NumElems = VT.getVectorNumElements();
4204 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4205 return isUndefOrEqual(Mask[0], 6) &&
4206 isUndefOrEqual(Mask[1], 7) &&
4207 isUndefOrEqual(Mask[2], 2) &&
4208 isUndefOrEqual(Mask[3], 3);
4211 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4212 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4214 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4215 if (!VT.is128BitVector())
4218 unsigned NumElems = VT.getVectorNumElements();
4223 return isUndefOrEqual(Mask[0], 2) &&
4224 isUndefOrEqual(Mask[1], 3) &&
4225 isUndefOrEqual(Mask[2], 2) &&
4226 isUndefOrEqual(Mask[3], 3);
4229 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4230 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4231 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4232 if (!VT.is128BitVector())
4235 unsigned NumElems = VT.getVectorNumElements();
4237 if (NumElems != 2 && NumElems != 4)
4240 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4241 if (!isUndefOrEqual(Mask[i], i + NumElems))
4244 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4245 if (!isUndefOrEqual(Mask[i], i))
4251 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4252 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4253 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4254 if (!VT.is128BitVector())
4257 unsigned NumElems = VT.getVectorNumElements();
4259 if (NumElems != 2 && NumElems != 4)
4262 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4263 if (!isUndefOrEqual(Mask[i], i))
4266 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4267 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4273 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4274 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4275 /// i. e: If all but one element come from the same vector.
4276 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4277 // TODO: Deal with AVX's VINSERTPS
4278 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4281 unsigned CorrectPosV1 = 0;
4282 unsigned CorrectPosV2 = 0;
4283 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4284 if (Mask[i] == -1) {
4292 else if (Mask[i] == i + 4)
4296 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4297 // We have 3 elements (undefs count as elements from any vector) from one
4298 // vector, and one from another.
4305 // Some special combinations that can be optimized.
4308 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4309 SelectionDAG &DAG) {
4310 MVT VT = SVOp->getSimpleValueType(0);
4313 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4316 ArrayRef<int> Mask = SVOp->getMask();
4318 // These are the special masks that may be optimized.
4319 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4320 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4321 bool MatchEvenMask = true;
4322 bool MatchOddMask = true;
4323 for (int i=0; i<8; ++i) {
4324 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4325 MatchEvenMask = false;
4326 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4327 MatchOddMask = false;
4330 if (!MatchEvenMask && !MatchOddMask)
4333 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4335 SDValue Op0 = SVOp->getOperand(0);
4336 SDValue Op1 = SVOp->getOperand(1);
4338 if (MatchEvenMask) {
4339 // Shift the second operand right to 32 bits.
4340 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4341 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4343 // Shift the first operand left to 32 bits.
4344 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4345 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4347 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4348 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4351 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4352 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4353 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4354 bool HasInt256, bool V2IsSplat = false) {
4356 assert(VT.getSizeInBits() >= 128 &&
4357 "Unsupported vector type for unpckl");
4359 unsigned NumElts = VT.getVectorNumElements();
4360 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4361 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4364 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4365 "Unsupported vector type for unpckh");
4367 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4368 unsigned NumLanes = VT.getSizeInBits()/128;
4369 unsigned NumLaneElts = NumElts/NumLanes;
4371 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4372 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4373 int BitI = Mask[l+i];
4374 int BitI1 = Mask[l+i+1];
4375 if (!isUndefOrEqual(BitI, j))
4378 if (!isUndefOrEqual(BitI1, NumElts))
4381 if (!isUndefOrEqual(BitI1, j + NumElts))
4390 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4391 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4392 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4393 bool HasInt256, bool V2IsSplat = false) {
4394 assert(VT.getSizeInBits() >= 128 &&
4395 "Unsupported vector type for unpckh");
4397 unsigned NumElts = VT.getVectorNumElements();
4398 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4399 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4402 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4403 "Unsupported vector type for unpckh");
4405 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4406 unsigned NumLanes = VT.getSizeInBits()/128;
4407 unsigned NumLaneElts = NumElts/NumLanes;
4409 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4410 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4411 int BitI = Mask[l+i];
4412 int BitI1 = Mask[l+i+1];
4413 if (!isUndefOrEqual(BitI, j))
4416 if (isUndefOrEqual(BitI1, NumElts))
4419 if (!isUndefOrEqual(BitI1, j+NumElts))
4427 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4428 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4430 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4431 unsigned NumElts = VT.getVectorNumElements();
4432 bool Is256BitVec = VT.is256BitVector();
4434 if (VT.is512BitVector())
4436 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4437 "Unsupported vector type for unpckh");
4439 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4440 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4443 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4444 // FIXME: Need a better way to get rid of this, there's no latency difference
4445 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4446 // the former later. We should also remove the "_undef" special mask.
4447 if (NumElts == 4 && Is256BitVec)
4450 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4451 // independently on 128-bit lanes.
4452 unsigned NumLanes = VT.getSizeInBits()/128;
4453 unsigned NumLaneElts = NumElts/NumLanes;
4455 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4456 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4457 int BitI = Mask[l+i];
4458 int BitI1 = Mask[l+i+1];
4460 if (!isUndefOrEqual(BitI, j))
4462 if (!isUndefOrEqual(BitI1, j))
4470 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4471 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4473 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4474 unsigned NumElts = VT.getVectorNumElements();
4476 if (VT.is512BitVector())
4479 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4480 "Unsupported vector type for unpckh");
4482 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4483 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4486 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4487 // independently on 128-bit lanes.
4488 unsigned NumLanes = VT.getSizeInBits()/128;
4489 unsigned NumLaneElts = NumElts/NumLanes;
4491 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4492 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4493 int BitI = Mask[l+i];
4494 int BitI1 = Mask[l+i+1];
4495 if (!isUndefOrEqual(BitI, j))
4497 if (!isUndefOrEqual(BitI1, j))
4504 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4505 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4506 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4507 if (!VT.is512BitVector())
4510 unsigned NumElts = VT.getVectorNumElements();
4511 unsigned HalfSize = NumElts/2;
4512 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4513 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4518 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4519 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4527 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4528 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4529 /// MOVSD, and MOVD, i.e. setting the lowest element.
4530 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4531 if (VT.getVectorElementType().getSizeInBits() < 32)
4533 if (!VT.is128BitVector())
4536 unsigned NumElts = VT.getVectorNumElements();
4538 if (!isUndefOrEqual(Mask[0], NumElts))
4541 for (unsigned i = 1; i != NumElts; ++i)
4542 if (!isUndefOrEqual(Mask[i], i))
4548 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4549 /// as permutations between 128-bit chunks or halves. As an example: this
4551 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4552 /// The first half comes from the second half of V1 and the second half from the
4553 /// the second half of V2.
4554 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4555 if (!HasFp256 || !VT.is256BitVector())
4558 // The shuffle result is divided into half A and half B. In total the two
4559 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4560 // B must come from C, D, E or F.
4561 unsigned HalfSize = VT.getVectorNumElements()/2;
4562 bool MatchA = false, MatchB = false;
4564 // Check if A comes from one of C, D, E, F.
4565 for (unsigned Half = 0; Half != 4; ++Half) {
4566 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4572 // Check if B comes from one of C, D, E, F.
4573 for (unsigned Half = 0; Half != 4; ++Half) {
4574 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4580 return MatchA && MatchB;
4583 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4584 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4585 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4586 MVT VT = SVOp->getSimpleValueType(0);
4588 unsigned HalfSize = VT.getVectorNumElements()/2;
4590 unsigned FstHalf = 0, SndHalf = 0;
4591 for (unsigned i = 0; i < HalfSize; ++i) {
4592 if (SVOp->getMaskElt(i) > 0) {
4593 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4597 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4598 if (SVOp->getMaskElt(i) > 0) {
4599 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4604 return (FstHalf | (SndHalf << 4));
4607 // Symmetric in-lane mask. Each lane has 4 elements (for imm8)
4608 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4609 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4613 unsigned NumElts = VT.getVectorNumElements();
4615 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4616 for (unsigned i = 0; i != NumElts; ++i) {
4619 Imm8 |= Mask[i] << (i*2);
4624 unsigned LaneSize = 4;
4625 SmallVector<int, 4> MaskVal(LaneSize, -1);
4627 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4628 for (unsigned i = 0; i != LaneSize; ++i) {
4629 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4633 if (MaskVal[i] < 0) {
4634 MaskVal[i] = Mask[i+l] - l;
4635 Imm8 |= MaskVal[i] << (i*2);
4638 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4645 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4646 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4647 /// Note that VPERMIL mask matching is different depending whether theunderlying
4648 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4649 /// to the same elements of the low, but to the higher half of the source.
4650 /// In VPERMILPD the two lanes could be shuffled independently of each other
4651 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4652 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4653 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4654 if (VT.getSizeInBits() < 256 || EltSize < 32)
4656 bool symmetricMaskRequired = (EltSize == 32);
4657 unsigned NumElts = VT.getVectorNumElements();
4659 unsigned NumLanes = VT.getSizeInBits()/128;
4660 unsigned LaneSize = NumElts/NumLanes;
4661 // 2 or 4 elements in one lane
4663 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4664 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4665 for (unsigned i = 0; i != LaneSize; ++i) {
4666 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4668 if (symmetricMaskRequired) {
4669 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4670 ExpectedMaskVal[i] = Mask[i+l] - l;
4673 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4681 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4682 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4683 /// element of vector 2 and the other elements to come from vector 1 in order.
4684 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4685 bool V2IsSplat = false, bool V2IsUndef = false) {
4686 if (!VT.is128BitVector())
4689 unsigned NumOps = VT.getVectorNumElements();
4690 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4693 if (!isUndefOrEqual(Mask[0], 0))
4696 for (unsigned i = 1; i != NumOps; ++i)
4697 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4698 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4699 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4705 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4706 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4707 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4708 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4709 const X86Subtarget *Subtarget) {
4710 if (!Subtarget->hasSSE3())
4713 unsigned NumElems = VT.getVectorNumElements();
4715 if ((VT.is128BitVector() && NumElems != 4) ||
4716 (VT.is256BitVector() && NumElems != 8) ||
4717 (VT.is512BitVector() && NumElems != 16))
4720 // "i+1" is the value the indexed mask element must have
4721 for (unsigned i = 0; i != NumElems; i += 2)
4722 if (!isUndefOrEqual(Mask[i], i+1) ||
4723 !isUndefOrEqual(Mask[i+1], i+1))
4729 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4730 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4731 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4732 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4733 const X86Subtarget *Subtarget) {
4734 if (!Subtarget->hasSSE3())
4737 unsigned NumElems = VT.getVectorNumElements();
4739 if ((VT.is128BitVector() && NumElems != 4) ||
4740 (VT.is256BitVector() && NumElems != 8) ||
4741 (VT.is512BitVector() && NumElems != 16))
4744 // "i" is the value the indexed mask element must have
4745 for (unsigned i = 0; i != NumElems; i += 2)
4746 if (!isUndefOrEqual(Mask[i], i) ||
4747 !isUndefOrEqual(Mask[i+1], i))
4753 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4754 /// specifies a shuffle of elements that is suitable for input to 256-bit
4755 /// version of MOVDDUP.
4756 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4757 if (!HasFp256 || !VT.is256BitVector())
4760 unsigned NumElts = VT.getVectorNumElements();
4764 for (unsigned i = 0; i != NumElts/2; ++i)
4765 if (!isUndefOrEqual(Mask[i], 0))
4767 for (unsigned i = NumElts/2; i != NumElts; ++i)
4768 if (!isUndefOrEqual(Mask[i], NumElts/2))
4773 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4774 /// specifies a shuffle of elements that is suitable for input to 128-bit
4775 /// version of MOVDDUP.
4776 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4777 if (!VT.is128BitVector())
4780 unsigned e = VT.getVectorNumElements() / 2;
4781 for (unsigned i = 0; i != e; ++i)
4782 if (!isUndefOrEqual(Mask[i], i))
4784 for (unsigned i = 0; i != e; ++i)
4785 if (!isUndefOrEqual(Mask[e+i], i))
4790 /// isVEXTRACTIndex - Return true if the specified
4791 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4792 /// suitable for instruction that extract 128 or 256 bit vectors
4793 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4794 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4795 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4798 // The index should be aligned on a vecWidth-bit boundary.
4800 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4802 MVT VT = N->getSimpleValueType(0);
4803 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4804 bool Result = (Index * ElSize) % vecWidth == 0;
4809 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4810 /// operand specifies a subvector insert that is suitable for input to
4811 /// insertion of 128 or 256-bit subvectors
4812 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4813 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4814 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4816 // The index should be aligned on a vecWidth-bit boundary.
4818 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4820 MVT VT = N->getSimpleValueType(0);
4821 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4822 bool Result = (Index * ElSize) % vecWidth == 0;
4827 bool X86::isVINSERT128Index(SDNode *N) {
4828 return isVINSERTIndex(N, 128);
4831 bool X86::isVINSERT256Index(SDNode *N) {
4832 return isVINSERTIndex(N, 256);
4835 bool X86::isVEXTRACT128Index(SDNode *N) {
4836 return isVEXTRACTIndex(N, 128);
4839 bool X86::isVEXTRACT256Index(SDNode *N) {
4840 return isVEXTRACTIndex(N, 256);
4843 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4844 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4845 /// Handles 128-bit and 256-bit.
4846 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4847 MVT VT = N->getSimpleValueType(0);
4849 assert((VT.getSizeInBits() >= 128) &&
4850 "Unsupported vector type for PSHUF/SHUFP");
4852 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4853 // independently on 128-bit lanes.
4854 unsigned NumElts = VT.getVectorNumElements();
4855 unsigned NumLanes = VT.getSizeInBits()/128;
4856 unsigned NumLaneElts = NumElts/NumLanes;
4858 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4859 "Only supports 2, 4 or 8 elements per lane");
4861 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4863 for (unsigned i = 0; i != NumElts; ++i) {
4864 int Elt = N->getMaskElt(i);
4865 if (Elt < 0) continue;
4866 Elt &= NumLaneElts - 1;
4867 unsigned ShAmt = (i << Shift) % 8;
4868 Mask |= Elt << ShAmt;
4874 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4875 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4876 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4877 MVT VT = N->getSimpleValueType(0);
4879 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4880 "Unsupported vector type for PSHUFHW");
4882 unsigned NumElts = VT.getVectorNumElements();
4885 for (unsigned l = 0; l != NumElts; l += 8) {
4886 // 8 nodes per lane, but we only care about the last 4.
4887 for (unsigned i = 0; i < 4; ++i) {
4888 int Elt = N->getMaskElt(l+i+4);
4889 if (Elt < 0) continue;
4890 Elt &= 0x3; // only 2-bits.
4891 Mask |= Elt << (i * 2);
4898 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4899 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4900 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4901 MVT VT = N->getSimpleValueType(0);
4903 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4904 "Unsupported vector type for PSHUFHW");
4906 unsigned NumElts = VT.getVectorNumElements();
4909 for (unsigned l = 0; l != NumElts; l += 8) {
4910 // 8 nodes per lane, but we only care about the first 4.
4911 for (unsigned i = 0; i < 4; ++i) {
4912 int Elt = N->getMaskElt(l+i);
4913 if (Elt < 0) continue;
4914 Elt &= 0x3; // only 2-bits
4915 Mask |= Elt << (i * 2);
4922 /// \brief Return the appropriate immediate to shuffle the specified
4923 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4924 /// VALIGN (if Interlane is true) instructions.
4925 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4927 MVT VT = SVOp->getSimpleValueType(0);
4928 unsigned EltSize = InterLane ? 1 :
4929 VT.getVectorElementType().getSizeInBits() >> 3;
4931 unsigned NumElts = VT.getVectorNumElements();
4932 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4933 unsigned NumLaneElts = NumElts/NumLanes;
4937 for (i = 0; i != NumElts; ++i) {
4938 Val = SVOp->getMaskElt(i);
4942 if (Val >= (int)NumElts)
4943 Val -= NumElts - NumLaneElts;
4945 assert(Val - i > 0 && "PALIGNR imm should be positive");
4946 return (Val - i) * EltSize;
4949 /// \brief Return the appropriate immediate to shuffle the specified
4950 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4951 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4952 return getShuffleAlignrImmediate(SVOp, false);
4955 /// \brief Return the appropriate immediate to shuffle the specified
4956 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4957 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4958 return getShuffleAlignrImmediate(SVOp, true);
4962 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4963 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4964 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4965 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4968 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4970 MVT VecVT = N->getOperand(0).getSimpleValueType();
4971 MVT ElVT = VecVT.getVectorElementType();
4973 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4974 return Index / NumElemsPerChunk;
4977 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4978 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4979 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4980 llvm_unreachable("Illegal insert subvector for VINSERT");
4983 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4985 MVT VecVT = N->getSimpleValueType(0);
4986 MVT ElVT = VecVT.getVectorElementType();
4988 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4989 return Index / NumElemsPerChunk;
4992 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4993 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4994 /// and VINSERTI128 instructions.
4995 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4996 return getExtractVEXTRACTImmediate(N, 128);
4999 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
5000 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
5001 /// and VINSERTI64x4 instructions.
5002 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
5003 return getExtractVEXTRACTImmediate(N, 256);
5006 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5007 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5008 /// and VINSERTI128 instructions.
5009 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5010 return getInsertVINSERTImmediate(N, 128);
5013 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5014 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5015 /// and VINSERTI64x4 instructions.
5016 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5017 return getInsertVINSERTImmediate(N, 256);
5020 /// isZero - Returns true if Elt is a constant integer zero
5021 static bool isZero(SDValue V) {
5022 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5023 return C && C->isNullValue();
5026 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5028 bool X86::isZeroNode(SDValue Elt) {
5031 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5032 return CFP->getValueAPF().isPosZero();
5036 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5037 /// match movhlps. The lower half elements should come from upper half of
5038 /// V1 (and in order), and the upper half elements should come from the upper
5039 /// half of V2 (and in order).
5040 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5041 if (!VT.is128BitVector())
5043 if (VT.getVectorNumElements() != 4)
5045 for (unsigned i = 0, e = 2; i != e; ++i)
5046 if (!isUndefOrEqual(Mask[i], i+2))
5048 for (unsigned i = 2; i != 4; ++i)
5049 if (!isUndefOrEqual(Mask[i], i+4))
5054 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5055 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5057 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5058 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5060 N = N->getOperand(0).getNode();
5061 if (!ISD::isNON_EXTLoad(N))
5064 *LD = cast<LoadSDNode>(N);
5068 // Test whether the given value is a vector value which will be legalized
5070 static bool WillBeConstantPoolLoad(SDNode *N) {
5071 if (N->getOpcode() != ISD::BUILD_VECTOR)
5074 // Check for any non-constant elements.
5075 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5076 switch (N->getOperand(i).getNode()->getOpcode()) {
5078 case ISD::ConstantFP:
5085 // Vectors of all-zeros and all-ones are materialized with special
5086 // instructions rather than being loaded.
5087 return !ISD::isBuildVectorAllZeros(N) &&
5088 !ISD::isBuildVectorAllOnes(N);
5091 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5092 /// match movlp{s|d}. The lower half elements should come from lower half of
5093 /// V1 (and in order), and the upper half elements should come from the upper
5094 /// half of V2 (and in order). And since V1 will become the source of the
5095 /// MOVLP, it must be either a vector load or a scalar load to vector.
5096 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5097 ArrayRef<int> Mask, MVT VT) {
5098 if (!VT.is128BitVector())
5101 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5103 // Is V2 is a vector load, don't do this transformation. We will try to use
5104 // load folding shufps op.
5105 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5108 unsigned NumElems = VT.getVectorNumElements();
5110 if (NumElems != 2 && NumElems != 4)
5112 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5113 if (!isUndefOrEqual(Mask[i], i))
5115 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5116 if (!isUndefOrEqual(Mask[i], i+NumElems))
5121 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5122 /// to an zero vector.
5123 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5124 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5125 SDValue V1 = N->getOperand(0);
5126 SDValue V2 = N->getOperand(1);
5127 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5128 for (unsigned i = 0; i != NumElems; ++i) {
5129 int Idx = N->getMaskElt(i);
5130 if (Idx >= (int)NumElems) {
5131 unsigned Opc = V2.getOpcode();
5132 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5134 if (Opc != ISD::BUILD_VECTOR ||
5135 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5137 } else if (Idx >= 0) {
5138 unsigned Opc = V1.getOpcode();
5139 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5141 if (Opc != ISD::BUILD_VECTOR ||
5142 !X86::isZeroNode(V1.getOperand(Idx)))
5149 /// getZeroVector - Returns a vector of specified type with all zero elements.
5151 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5152 SelectionDAG &DAG, SDLoc dl) {
5153 assert(VT.isVector() && "Expected a vector type");
5155 // Always build SSE zero vectors as <4 x i32> bitcasted
5156 // to their dest type. This ensures they get CSE'd.
5158 if (VT.is128BitVector()) { // SSE
5159 if (Subtarget->hasSSE2()) { // SSE2
5160 SDValue Cst = DAG.getConstant(0, MVT::i32);
5161 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5163 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5164 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5166 } else if (VT.is256BitVector()) { // AVX
5167 if (Subtarget->hasInt256()) { // AVX2
5168 SDValue Cst = DAG.getConstant(0, MVT::i32);
5169 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5170 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5172 // 256-bit logic and arithmetic instructions in AVX are all
5173 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5174 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5175 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5176 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5178 } else if (VT.is512BitVector()) { // AVX-512
5179 SDValue Cst = DAG.getConstant(0, MVT::i32);
5180 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5181 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5182 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5183 } else if (VT.getScalarType() == MVT::i1) {
5184 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5185 SDValue Cst = DAG.getConstant(0, MVT::i1);
5186 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5187 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5189 llvm_unreachable("Unexpected vector type");
5191 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5194 /// getOnesVector - Returns a vector of specified type with all bits set.
5195 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5196 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5197 /// Then bitcast to their original type, ensuring they get CSE'd.
5198 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5200 assert(VT.isVector() && "Expected a vector type");
5202 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5204 if (VT.is256BitVector()) {
5205 if (HasInt256) { // AVX2
5206 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5207 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5209 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5210 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5212 } else if (VT.is128BitVector()) {
5213 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5215 llvm_unreachable("Unexpected vector type");
5217 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5220 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5221 /// that point to V2 points to its first element.
5222 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5223 for (unsigned i = 0; i != NumElems; ++i) {
5224 if (Mask[i] > (int)NumElems) {
5230 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5231 /// operation of specified width.
5232 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5234 unsigned NumElems = VT.getVectorNumElements();
5235 SmallVector<int, 8> Mask;
5236 Mask.push_back(NumElems);
5237 for (unsigned i = 1; i != NumElems; ++i)
5239 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5242 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5243 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5245 unsigned NumElems = VT.getVectorNumElements();
5246 SmallVector<int, 8> Mask;
5247 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5249 Mask.push_back(i + NumElems);
5251 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5254 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5255 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5257 unsigned NumElems = VT.getVectorNumElements();
5258 SmallVector<int, 8> Mask;
5259 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5260 Mask.push_back(i + Half);
5261 Mask.push_back(i + NumElems + Half);
5263 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5266 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5267 // a generic shuffle instruction because the target has no such instructions.
5268 // Generate shuffles which repeat i16 and i8 several times until they can be
5269 // represented by v4f32 and then be manipulated by target suported shuffles.
5270 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5271 MVT VT = V.getSimpleValueType();
5272 int NumElems = VT.getVectorNumElements();
5275 while (NumElems > 4) {
5276 if (EltNo < NumElems/2) {
5277 V = getUnpackl(DAG, dl, VT, V, V);
5279 V = getUnpackh(DAG, dl, VT, V, V);
5280 EltNo -= NumElems/2;
5287 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5288 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5289 MVT VT = V.getSimpleValueType();
5292 if (VT.is128BitVector()) {
5293 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5294 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5295 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5297 } else if (VT.is256BitVector()) {
5298 // To use VPERMILPS to splat scalars, the second half of indicies must
5299 // refer to the higher part, which is a duplication of the lower one,
5300 // because VPERMILPS can only handle in-lane permutations.
5301 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5302 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5304 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5305 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5308 llvm_unreachable("Vector size not supported");
5310 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5313 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5314 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5315 MVT SrcVT = SV->getSimpleValueType(0);
5316 SDValue V1 = SV->getOperand(0);
5319 int EltNo = SV->getSplatIndex();
5320 int NumElems = SrcVT.getVectorNumElements();
5321 bool Is256BitVec = SrcVT.is256BitVector();
5323 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5324 "Unknown how to promote splat for type");
5326 // Extract the 128-bit part containing the splat element and update
5327 // the splat element index when it refers to the higher register.
5329 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5330 if (EltNo >= NumElems/2)
5331 EltNo -= NumElems/2;
5334 // All i16 and i8 vector types can't be used directly by a generic shuffle
5335 // instruction because the target has no such instruction. Generate shuffles
5336 // which repeat i16 and i8 several times until they fit in i32, and then can
5337 // be manipulated by target suported shuffles.
5338 MVT EltVT = SrcVT.getVectorElementType();
5339 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5340 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5342 // Recreate the 256-bit vector and place the same 128-bit vector
5343 // into the low and high part. This is necessary because we want
5344 // to use VPERM* to shuffle the vectors
5346 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5349 return getLegalSplat(DAG, V1, EltNo);
5352 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5353 /// vector of zero or undef vector. This produces a shuffle where the low
5354 /// element of V2 is swizzled into the zero/undef vector, landing at element
5355 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5356 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5358 const X86Subtarget *Subtarget,
5359 SelectionDAG &DAG) {
5360 MVT VT = V2.getSimpleValueType();
5362 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5363 unsigned NumElems = VT.getVectorNumElements();
5364 SmallVector<int, 16> MaskVec;
5365 for (unsigned i = 0; i != NumElems; ++i)
5366 // If this is the insertion idx, put the low elt of V2 here.
5367 MaskVec.push_back(i == Idx ? NumElems : i);
5368 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5371 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5372 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5373 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5374 /// shuffles which use a single input multiple times, and in those cases it will
5375 /// adjust the mask to only have indices within that single input.
5376 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5377 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5378 unsigned NumElems = VT.getVectorNumElements();
5382 bool IsFakeUnary = false;
5383 switch(N->getOpcode()) {
5384 case X86ISD::BLENDI:
5385 ImmN = N->getOperand(N->getNumOperands()-1);
5386 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5389 ImmN = N->getOperand(N->getNumOperands()-1);
5390 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5391 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5393 case X86ISD::UNPCKH:
5394 DecodeUNPCKHMask(VT, Mask);
5395 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5397 case X86ISD::UNPCKL:
5398 DecodeUNPCKLMask(VT, Mask);
5399 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5401 case X86ISD::MOVHLPS:
5402 DecodeMOVHLPSMask(NumElems, Mask);
5403 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5405 case X86ISD::MOVLHPS:
5406 DecodeMOVLHPSMask(NumElems, Mask);
5407 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5409 case X86ISD::PALIGNR:
5410 ImmN = N->getOperand(N->getNumOperands()-1);
5411 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5413 case X86ISD::PSHUFD:
5414 case X86ISD::VPERMILPI:
5415 ImmN = N->getOperand(N->getNumOperands()-1);
5416 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5419 case X86ISD::PSHUFHW:
5420 ImmN = N->getOperand(N->getNumOperands()-1);
5421 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5424 case X86ISD::PSHUFLW:
5425 ImmN = N->getOperand(N->getNumOperands()-1);
5426 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5429 case X86ISD::PSHUFB: {
5431 SDValue MaskNode = N->getOperand(1);
5432 while (MaskNode->getOpcode() == ISD::BITCAST)
5433 MaskNode = MaskNode->getOperand(0);
5435 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5436 // If we have a build-vector, then things are easy.
5437 EVT VT = MaskNode.getValueType();
5438 assert(VT.isVector() &&
5439 "Can't produce a non-vector with a build_vector!");
5440 if (!VT.isInteger())
5443 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5445 SmallVector<uint64_t, 32> RawMask;
5446 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5447 SDValue Op = MaskNode->getOperand(i);
5448 if (Op->getOpcode() == ISD::UNDEF) {
5449 RawMask.push_back((uint64_t)SM_SentinelUndef);
5452 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5455 APInt MaskElement = CN->getAPIntValue();
5457 // We now have to decode the element which could be any integer size and
5458 // extract each byte of it.
5459 for (int j = 0; j < NumBytesPerElement; ++j) {
5460 // Note that this is x86 and so always little endian: the low byte is
5461 // the first byte of the mask.
5462 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5463 MaskElement = MaskElement.lshr(8);
5466 DecodePSHUFBMask(RawMask, Mask);
5470 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5474 SDValue Ptr = MaskLoad->getBasePtr();
5475 if (Ptr->getOpcode() == X86ISD::Wrapper)
5476 Ptr = Ptr->getOperand(0);
5478 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5479 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5482 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5483 DecodePSHUFBMask(C, Mask);
5491 case X86ISD::VPERMI:
5492 ImmN = N->getOperand(N->getNumOperands()-1);
5493 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5498 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5500 case X86ISD::VPERM2X128:
5501 ImmN = N->getOperand(N->getNumOperands()-1);
5502 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5503 if (Mask.empty()) return false;
5505 case X86ISD::MOVSLDUP:
5506 DecodeMOVSLDUPMask(VT, Mask);
5509 case X86ISD::MOVSHDUP:
5510 DecodeMOVSHDUPMask(VT, Mask);
5513 case X86ISD::MOVDDUP:
5514 DecodeMOVDDUPMask(VT, Mask);
5517 case X86ISD::MOVLHPD:
5518 case X86ISD::MOVLPD:
5519 case X86ISD::MOVLPS:
5520 // Not yet implemented
5522 default: llvm_unreachable("unknown target shuffle node");
5525 // If we have a fake unary shuffle, the shuffle mask is spread across two
5526 // inputs that are actually the same node. Re-map the mask to always point
5527 // into the first input.
5530 if (M >= (int)Mask.size())
5536 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5537 /// element of the result of the vector shuffle.
5538 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5541 return SDValue(); // Limit search depth.
5543 SDValue V = SDValue(N, 0);
5544 EVT VT = V.getValueType();
5545 unsigned Opcode = V.getOpcode();
5547 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5548 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5549 int Elt = SV->getMaskElt(Index);
5552 return DAG.getUNDEF(VT.getVectorElementType());
5554 unsigned NumElems = VT.getVectorNumElements();
5555 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5556 : SV->getOperand(1);
5557 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5560 // Recurse into target specific vector shuffles to find scalars.
5561 if (isTargetShuffle(Opcode)) {
5562 MVT ShufVT = V.getSimpleValueType();
5563 unsigned NumElems = ShufVT.getVectorNumElements();
5564 SmallVector<int, 16> ShuffleMask;
5567 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5570 int Elt = ShuffleMask[Index];
5572 return DAG.getUNDEF(ShufVT.getVectorElementType());
5574 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5576 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5580 // Actual nodes that may contain scalar elements
5581 if (Opcode == ISD::BITCAST) {
5582 V = V.getOperand(0);
5583 EVT SrcVT = V.getValueType();
5584 unsigned NumElems = VT.getVectorNumElements();
5586 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5590 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5591 return (Index == 0) ? V.getOperand(0)
5592 : DAG.getUNDEF(VT.getVectorElementType());
5594 if (V.getOpcode() == ISD::BUILD_VECTOR)
5595 return V.getOperand(Index);
5600 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5601 /// shuffle operation which come from a consecutively from a zero. The
5602 /// search can start in two different directions, from left or right.
5603 /// We count undefs as zeros until PreferredNum is reached.
5604 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5605 unsigned NumElems, bool ZerosFromLeft,
5607 unsigned PreferredNum = -1U) {
5608 unsigned NumZeros = 0;
5609 for (unsigned i = 0; i != NumElems; ++i) {
5610 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5611 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5615 if (X86::isZeroNode(Elt))
5617 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5618 NumZeros = std::min(NumZeros + 1, PreferredNum);
5626 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5627 /// correspond consecutively to elements from one of the vector operands,
5628 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5630 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5631 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5632 unsigned NumElems, unsigned &OpNum) {
5633 bool SeenV1 = false;
5634 bool SeenV2 = false;
5636 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5637 int Idx = SVOp->getMaskElt(i);
5638 // Ignore undef indicies
5642 if (Idx < (int)NumElems)
5647 // Only accept consecutive elements from the same vector
5648 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5652 OpNum = SeenV1 ? 0 : 1;
5656 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5657 /// logical left shift of a vector.
5658 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5659 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5661 SVOp->getSimpleValueType(0).getVectorNumElements();
5662 unsigned NumZeros = getNumOfConsecutiveZeros(
5663 SVOp, NumElems, false /* check zeros from right */, DAG,
5664 SVOp->getMaskElt(0));
5670 // Considering the elements in the mask that are not consecutive zeros,
5671 // check if they consecutively come from only one of the source vectors.
5673 // V1 = {X, A, B, C} 0
5675 // vector_shuffle V1, V2 <1, 2, 3, X>
5677 if (!isShuffleMaskConsecutive(SVOp,
5678 0, // Mask Start Index
5679 NumElems-NumZeros, // Mask End Index(exclusive)
5680 NumZeros, // Where to start looking in the src vector
5681 NumElems, // Number of elements in vector
5682 OpSrc)) // Which source operand ?
5687 ShVal = SVOp->getOperand(OpSrc);
5691 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5692 /// logical left shift of a vector.
5693 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5694 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5696 SVOp->getSimpleValueType(0).getVectorNumElements();
5697 unsigned NumZeros = getNumOfConsecutiveZeros(
5698 SVOp, NumElems, true /* check zeros from left */, DAG,
5699 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5705 // Considering the elements in the mask that are not consecutive zeros,
5706 // check if they consecutively come from only one of the source vectors.
5708 // 0 { A, B, X, X } = V2
5710 // vector_shuffle V1, V2 <X, X, 4, 5>
5712 if (!isShuffleMaskConsecutive(SVOp,
5713 NumZeros, // Mask Start Index
5714 NumElems, // Mask End Index(exclusive)
5715 0, // Where to start looking in the src vector
5716 NumElems, // Number of elements in vector
5717 OpSrc)) // Which source operand ?
5722 ShVal = SVOp->getOperand(OpSrc);
5726 /// isVectorShift - Returns true if the shuffle can be implemented as a
5727 /// logical left or right shift of a vector.
5728 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5729 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5730 // Although the logic below support any bitwidth size, there are no
5731 // shift instructions which handle more than 128-bit vectors.
5732 if (!SVOp->getSimpleValueType(0).is128BitVector())
5735 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5736 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5742 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5744 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5745 unsigned NumNonZero, unsigned NumZero,
5747 const X86Subtarget* Subtarget,
5748 const TargetLowering &TLI) {
5755 for (unsigned i = 0; i < 16; ++i) {
5756 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5757 if (ThisIsNonZero && First) {
5759 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5761 V = DAG.getUNDEF(MVT::v8i16);
5766 SDValue ThisElt, LastElt;
5767 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5768 if (LastIsNonZero) {
5769 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5770 MVT::i16, Op.getOperand(i-1));
5772 if (ThisIsNonZero) {
5773 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5774 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5775 ThisElt, DAG.getConstant(8, MVT::i8));
5777 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5781 if (ThisElt.getNode())
5782 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5783 DAG.getIntPtrConstant(i/2));
5787 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5790 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5792 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5793 unsigned NumNonZero, unsigned NumZero,
5795 const X86Subtarget* Subtarget,
5796 const TargetLowering &TLI) {
5803 for (unsigned i = 0; i < 8; ++i) {
5804 bool isNonZero = (NonZeros & (1 << i)) != 0;
5808 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5810 V = DAG.getUNDEF(MVT::v8i16);
5813 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5814 MVT::v8i16, V, Op.getOperand(i),
5815 DAG.getIntPtrConstant(i));
5822 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5823 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5824 const X86Subtarget *Subtarget,
5825 const TargetLowering &TLI) {
5826 // Find all zeroable elements.
5827 std::bitset<4> Zeroable;
5828 for (int i=0; i < 4; ++i) {
5829 SDValue Elt = Op->getOperand(i);
5830 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5832 assert(Zeroable.size() - Zeroable.count() > 1 &&
5833 "We expect at least two non-zero elements!");
5835 // We only know how to deal with build_vector nodes where elements are either
5836 // zeroable or extract_vector_elt with constant index.
5837 SDValue FirstNonZero;
5838 unsigned FirstNonZeroIdx;
5839 for (unsigned i=0; i < 4; ++i) {
5842 SDValue Elt = Op->getOperand(i);
5843 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5844 !isa<ConstantSDNode>(Elt.getOperand(1)))
5846 // Make sure that this node is extracting from a 128-bit vector.
5847 MVT VT = Elt.getOperand(0).getSimpleValueType();
5848 if (!VT.is128BitVector())
5850 if (!FirstNonZero.getNode()) {
5852 FirstNonZeroIdx = i;
5856 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5857 SDValue V1 = FirstNonZero.getOperand(0);
5858 MVT VT = V1.getSimpleValueType();
5860 // See if this build_vector can be lowered as a blend with zero.
5862 unsigned EltMaskIdx, EltIdx;
5864 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5865 if (Zeroable[EltIdx]) {
5866 // The zero vector will be on the right hand side.
5867 Mask[EltIdx] = EltIdx+4;
5871 Elt = Op->getOperand(EltIdx);
5872 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5873 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5874 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5876 Mask[EltIdx] = EltIdx;
5880 // Let the shuffle legalizer deal with blend operations.
5881 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5882 if (V1.getSimpleValueType() != VT)
5883 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5884 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5887 // See if we can lower this build_vector to a INSERTPS.
5888 if (!Subtarget->hasSSE41())
5891 SDValue V2 = Elt.getOperand(0);
5892 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5895 bool CanFold = true;
5896 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5900 SDValue Current = Op->getOperand(i);
5901 SDValue SrcVector = Current->getOperand(0);
5904 CanFold = SrcVector == V1 &&
5905 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5911 assert(V1.getNode() && "Expected at least two non-zero elements!");
5912 if (V1.getSimpleValueType() != MVT::v4f32)
5913 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5914 if (V2.getSimpleValueType() != MVT::v4f32)
5915 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5917 // Ok, we can emit an INSERTPS instruction.
5918 unsigned ZMask = Zeroable.to_ulong();
5920 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5921 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5922 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5923 DAG.getIntPtrConstant(InsertPSMask));
5924 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5927 /// Return a vector logical shift node.
5928 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5929 unsigned NumBits, SelectionDAG &DAG,
5930 const TargetLowering &TLI, SDLoc dl) {
5931 assert(VT.is128BitVector() && "Unknown type for VShift");
5932 MVT ShVT = MVT::v2i64;
5933 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5934 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5935 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
5936 assert(NumBits % 8 == 0 && "Only support byte sized shifts");
5937 SDValue ShiftVal = DAG.getConstant(NumBits/8, ScalarShiftTy);
5938 return DAG.getNode(ISD::BITCAST, dl, VT,
5939 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5943 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5945 // Check if the scalar load can be widened into a vector load. And if
5946 // the address is "base + cst" see if the cst can be "absorbed" into
5947 // the shuffle mask.
5948 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5949 SDValue Ptr = LD->getBasePtr();
5950 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5952 EVT PVT = LD->getValueType(0);
5953 if (PVT != MVT::i32 && PVT != MVT::f32)
5958 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5959 FI = FINode->getIndex();
5961 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5962 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5963 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5964 Offset = Ptr.getConstantOperandVal(1);
5965 Ptr = Ptr.getOperand(0);
5970 // FIXME: 256-bit vector instructions don't require a strict alignment,
5971 // improve this code to support it better.
5972 unsigned RequiredAlign = VT.getSizeInBits()/8;
5973 SDValue Chain = LD->getChain();
5974 // Make sure the stack object alignment is at least 16 or 32.
5975 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5976 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5977 if (MFI->isFixedObjectIndex(FI)) {
5978 // Can't change the alignment. FIXME: It's possible to compute
5979 // the exact stack offset and reference FI + adjust offset instead.
5980 // If someone *really* cares about this. That's the way to implement it.
5983 MFI->setObjectAlignment(FI, RequiredAlign);
5987 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5988 // Ptr + (Offset & ~15).
5991 if ((Offset % RequiredAlign) & 3)
5993 int64_t StartOffset = Offset & ~(RequiredAlign-1);
5995 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
5996 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
5998 int EltNo = (Offset - StartOffset) >> 2;
5999 unsigned NumElems = VT.getVectorNumElements();
6001 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
6002 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
6003 LD->getPointerInfo().getWithOffset(StartOffset),
6004 false, false, false, 0);
6006 SmallVector<int, 8> Mask(NumElems, EltNo);
6008 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6014 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6015 /// elements can be replaced by a single large load which has the same value as
6016 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6018 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6020 /// FIXME: we'd also like to handle the case where the last elements are zero
6021 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6022 /// There's even a handy isZeroNode for that purpose.
6023 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6024 SDLoc &DL, SelectionDAG &DAG,
6025 bool isAfterLegalize) {
6026 unsigned NumElems = Elts.size();
6028 LoadSDNode *LDBase = nullptr;
6029 unsigned LastLoadedElt = -1U;
6031 // For each element in the initializer, see if we've found a load or an undef.
6032 // If we don't find an initial load element, or later load elements are
6033 // non-consecutive, bail out.
6034 for (unsigned i = 0; i < NumElems; ++i) {
6035 SDValue Elt = Elts[i];
6036 // Look through a bitcast.
6037 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
6038 Elt = Elt.getOperand(0);
6039 if (!Elt.getNode() ||
6040 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6043 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6045 LDBase = cast<LoadSDNode>(Elt.getNode());
6049 if (Elt.getOpcode() == ISD::UNDEF)
6052 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6053 EVT LdVT = Elt.getValueType();
6054 // Each loaded element must be the correct fractional portion of the
6055 // requested vector load.
6056 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
6058 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
6063 // If we have found an entire vector of loads and undefs, then return a large
6064 // load of the entire vector width starting at the base pointer. If we found
6065 // consecutive loads for the low half, generate a vzext_load node.
6066 if (LastLoadedElt == NumElems - 1) {
6067 assert(LDBase && "Did not find base load for merging consecutive loads");
6068 EVT EltVT = LDBase->getValueType(0);
6069 // Ensure that the input vector size for the merged loads matches the
6070 // cumulative size of the input elements.
6071 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
6074 if (isAfterLegalize &&
6075 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6078 SDValue NewLd = SDValue();
6080 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6081 LDBase->getPointerInfo(), LDBase->isVolatile(),
6082 LDBase->isNonTemporal(), LDBase->isInvariant(),
6083 LDBase->getAlignment());
6085 if (LDBase->hasAnyUseOfValue(1)) {
6086 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6088 SDValue(NewLd.getNode(), 1));
6089 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6090 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6091 SDValue(NewLd.getNode(), 1));
6097 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6098 //of a v4i32 / v4f32. It's probably worth generalizing.
6099 EVT EltVT = VT.getVectorElementType();
6100 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6101 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6102 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6103 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6105 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6106 LDBase->getPointerInfo(),
6107 LDBase->getAlignment(),
6108 false/*isVolatile*/, true/*ReadMem*/,
6111 // Make sure the newly-created LOAD is in the same position as LDBase in
6112 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6113 // update uses of LDBase's output chain to use the TokenFactor.
6114 if (LDBase->hasAnyUseOfValue(1)) {
6115 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6116 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6117 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6118 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6119 SDValue(ResNode.getNode(), 1));
6122 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6127 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6128 /// to generate a splat value for the following cases:
6129 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6130 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6131 /// a scalar load, or a constant.
6132 /// The VBROADCAST node is returned when a pattern is found,
6133 /// or SDValue() otherwise.
6134 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6135 SelectionDAG &DAG) {
6136 // VBROADCAST requires AVX.
6137 // TODO: Splats could be generated for non-AVX CPUs using SSE
6138 // instructions, but there's less potential gain for only 128-bit vectors.
6139 if (!Subtarget->hasAVX())
6142 MVT VT = Op.getSimpleValueType();
6145 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6146 "Unsupported vector type for broadcast.");
6151 switch (Op.getOpcode()) {
6153 // Unknown pattern found.
6156 case ISD::BUILD_VECTOR: {
6157 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6158 BitVector UndefElements;
6159 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6161 // We need a splat of a single value to use broadcast, and it doesn't
6162 // make any sense if the value is only in one element of the vector.
6163 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6167 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6168 Ld.getOpcode() == ISD::ConstantFP);
6170 // Make sure that all of the users of a non-constant load are from the
6171 // BUILD_VECTOR node.
6172 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6177 case ISD::VECTOR_SHUFFLE: {
6178 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6180 // Shuffles must have a splat mask where the first element is
6182 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6185 SDValue Sc = Op.getOperand(0);
6186 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6187 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6189 if (!Subtarget->hasInt256())
6192 // Use the register form of the broadcast instruction available on AVX2.
6193 if (VT.getSizeInBits() >= 256)
6194 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6195 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6198 Ld = Sc.getOperand(0);
6199 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6200 Ld.getOpcode() == ISD::ConstantFP);
6202 // The scalar_to_vector node and the suspected
6203 // load node must have exactly one user.
6204 // Constants may have multiple users.
6206 // AVX-512 has register version of the broadcast
6207 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6208 Ld.getValueType().getSizeInBits() >= 32;
6209 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6216 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6217 bool IsGE256 = (VT.getSizeInBits() >= 256);
6219 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6220 // instruction to save 8 or more bytes of constant pool data.
6221 // TODO: If multiple splats are generated to load the same constant,
6222 // it may be detrimental to overall size. There needs to be a way to detect
6223 // that condition to know if this is truly a size win.
6224 const Function *F = DAG.getMachineFunction().getFunction();
6225 bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
6227 // Handle broadcasting a single constant scalar from the constant pool
6229 // On Sandybridge (no AVX2), it is still better to load a constant vector
6230 // from the constant pool and not to broadcast it from a scalar.
6231 // But override that restriction when optimizing for size.
6232 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6233 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6234 EVT CVT = Ld.getValueType();
6235 assert(!CVT.isVector() && "Must not broadcast a vector type");
6237 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6238 // For size optimization, also splat v2f64 and v2i64, and for size opt
6239 // with AVX2, also splat i8 and i16.
6240 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6241 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6242 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6243 const Constant *C = nullptr;
6244 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6245 C = CI->getConstantIntValue();
6246 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6247 C = CF->getConstantFPValue();
6249 assert(C && "Invalid constant type");
6251 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6252 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6253 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6254 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6255 MachinePointerInfo::getConstantPool(),
6256 false, false, false, Alignment);
6258 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6262 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6264 // Handle AVX2 in-register broadcasts.
6265 if (!IsLoad && Subtarget->hasInt256() &&
6266 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6267 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6269 // The scalar source must be a normal load.
6273 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6274 (Subtarget->hasVLX() && ScalarSize == 64))
6275 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6277 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6278 // double since there is no vbroadcastsd xmm
6279 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6280 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6281 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6284 // Unsupported broadcast.
6288 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6289 /// underlying vector and index.
6291 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6293 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6295 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6296 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6299 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6301 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6303 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6304 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6307 // In this case the vector is the extract_subvector expression and the index
6308 // is 2, as specified by the shuffle.
6309 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6310 SDValue ShuffleVec = SVOp->getOperand(0);
6311 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6312 assert(ShuffleVecVT.getVectorElementType() ==
6313 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6315 int ShuffleIdx = SVOp->getMaskElt(Idx);
6316 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6317 ExtractedFromVec = ShuffleVec;
6323 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6324 MVT VT = Op.getSimpleValueType();
6326 // Skip if insert_vec_elt is not supported.
6327 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6328 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6332 unsigned NumElems = Op.getNumOperands();
6336 SmallVector<unsigned, 4> InsertIndices;
6337 SmallVector<int, 8> Mask(NumElems, -1);
6339 for (unsigned i = 0; i != NumElems; ++i) {
6340 unsigned Opc = Op.getOperand(i).getOpcode();
6342 if (Opc == ISD::UNDEF)
6345 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6346 // Quit if more than 1 elements need inserting.
6347 if (InsertIndices.size() > 1)
6350 InsertIndices.push_back(i);
6354 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6355 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6356 // Quit if non-constant index.
6357 if (!isa<ConstantSDNode>(ExtIdx))
6359 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6361 // Quit if extracted from vector of different type.
6362 if (ExtractedFromVec.getValueType() != VT)
6365 if (!VecIn1.getNode())
6366 VecIn1 = ExtractedFromVec;
6367 else if (VecIn1 != ExtractedFromVec) {
6368 if (!VecIn2.getNode())
6369 VecIn2 = ExtractedFromVec;
6370 else if (VecIn2 != ExtractedFromVec)
6371 // Quit if more than 2 vectors to shuffle
6375 if (ExtractedFromVec == VecIn1)
6377 else if (ExtractedFromVec == VecIn2)
6378 Mask[i] = Idx + NumElems;
6381 if (!VecIn1.getNode())
6384 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6385 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6386 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6387 unsigned Idx = InsertIndices[i];
6388 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6389 DAG.getIntPtrConstant(Idx));
6395 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6397 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6399 MVT VT = Op.getSimpleValueType();
6400 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6401 "Unexpected type in LowerBUILD_VECTORvXi1!");
6404 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6405 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6406 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6407 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6410 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6411 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6412 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6413 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6416 bool AllContants = true;
6417 uint64_t Immediate = 0;
6418 int NonConstIdx = -1;
6419 bool IsSplat = true;
6420 unsigned NumNonConsts = 0;
6421 unsigned NumConsts = 0;
6422 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6423 SDValue In = Op.getOperand(idx);
6424 if (In.getOpcode() == ISD::UNDEF)
6426 if (!isa<ConstantSDNode>(In)) {
6427 AllContants = false;
6432 if (cast<ConstantSDNode>(In)->getZExtValue())
6433 Immediate |= (1ULL << idx);
6435 if (In != Op.getOperand(0))
6440 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6441 DAG.getConstant(Immediate, MVT::i16));
6442 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6443 DAG.getIntPtrConstant(0));
6446 if (NumNonConsts == 1 && NonConstIdx != 0) {
6449 SDValue VecAsImm = DAG.getConstant(Immediate,
6450 MVT::getIntegerVT(VT.getSizeInBits()));
6451 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6454 DstVec = DAG.getUNDEF(VT);
6455 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6456 Op.getOperand(NonConstIdx),
6457 DAG.getIntPtrConstant(NonConstIdx));
6459 if (!IsSplat && (NonConstIdx != 0))
6460 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6461 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6464 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6465 DAG.getConstant(-1, SelectVT),
6466 DAG.getConstant(0, SelectVT));
6468 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6469 DAG.getConstant((Immediate | 1), SelectVT),
6470 DAG.getConstant(Immediate, SelectVT));
6471 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6474 /// \brief Return true if \p N implements a horizontal binop and return the
6475 /// operands for the horizontal binop into V0 and V1.
6477 /// This is a helper function of PerformBUILD_VECTORCombine.
6478 /// This function checks that the build_vector \p N in input implements a
6479 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6480 /// operation to match.
6481 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6482 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6483 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6486 /// This function only analyzes elements of \p N whose indices are
6487 /// in range [BaseIdx, LastIdx).
6488 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6490 unsigned BaseIdx, unsigned LastIdx,
6491 SDValue &V0, SDValue &V1) {
6492 EVT VT = N->getValueType(0);
6494 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6495 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6496 "Invalid Vector in input!");
6498 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6499 bool CanFold = true;
6500 unsigned ExpectedVExtractIdx = BaseIdx;
6501 unsigned NumElts = LastIdx - BaseIdx;
6502 V0 = DAG.getUNDEF(VT);
6503 V1 = DAG.getUNDEF(VT);
6505 // Check if N implements a horizontal binop.
6506 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6507 SDValue Op = N->getOperand(i + BaseIdx);
6510 if (Op->getOpcode() == ISD::UNDEF) {
6511 // Update the expected vector extract index.
6512 if (i * 2 == NumElts)
6513 ExpectedVExtractIdx = BaseIdx;
6514 ExpectedVExtractIdx += 2;
6518 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6523 SDValue Op0 = Op.getOperand(0);
6524 SDValue Op1 = Op.getOperand(1);
6526 // Try to match the following pattern:
6527 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6528 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6529 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6530 Op0.getOperand(0) == Op1.getOperand(0) &&
6531 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6532 isa<ConstantSDNode>(Op1.getOperand(1)));
6536 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6537 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6539 if (i * 2 < NumElts) {
6540 if (V0.getOpcode() == ISD::UNDEF)
6541 V0 = Op0.getOperand(0);
6543 if (V1.getOpcode() == ISD::UNDEF)
6544 V1 = Op0.getOperand(0);
6545 if (i * 2 == NumElts)
6546 ExpectedVExtractIdx = BaseIdx;
6549 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6550 if (I0 == ExpectedVExtractIdx)
6551 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6552 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6553 // Try to match the following dag sequence:
6554 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6555 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6559 ExpectedVExtractIdx += 2;
6565 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6566 /// a concat_vector.
6568 /// This is a helper function of PerformBUILD_VECTORCombine.
6569 /// This function expects two 256-bit vectors called V0 and V1.
6570 /// At first, each vector is split into two separate 128-bit vectors.
6571 /// Then, the resulting 128-bit vectors are used to implement two
6572 /// horizontal binary operations.
6574 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6576 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6577 /// the two new horizontal binop.
6578 /// When Mode is set, the first horizontal binop dag node would take as input
6579 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6580 /// horizontal binop dag node would take as input the lower 128-bit of V1
6581 /// and the upper 128-bit of V1.
6583 /// HADD V0_LO, V0_HI
6584 /// HADD V1_LO, V1_HI
6586 /// Otherwise, the first horizontal binop dag node takes as input the lower
6587 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6588 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6590 /// HADD V0_LO, V1_LO
6591 /// HADD V0_HI, V1_HI
6593 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6594 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6595 /// the upper 128-bits of the result.
6596 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6597 SDLoc DL, SelectionDAG &DAG,
6598 unsigned X86Opcode, bool Mode,
6599 bool isUndefLO, bool isUndefHI) {
6600 EVT VT = V0.getValueType();
6601 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6602 "Invalid nodes in input!");
6604 unsigned NumElts = VT.getVectorNumElements();
6605 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6606 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6607 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6608 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6609 EVT NewVT = V0_LO.getValueType();
6611 SDValue LO = DAG.getUNDEF(NewVT);
6612 SDValue HI = DAG.getUNDEF(NewVT);
6615 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6616 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6617 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6618 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6619 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6621 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6622 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6623 V1_LO->getOpcode() != ISD::UNDEF))
6624 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6626 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6627 V1_HI->getOpcode() != ISD::UNDEF))
6628 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6631 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6634 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6635 /// sequence of 'vadd + vsub + blendi'.
6636 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6637 const X86Subtarget *Subtarget) {
6639 EVT VT = BV->getValueType(0);
6640 unsigned NumElts = VT.getVectorNumElements();
6641 SDValue InVec0 = DAG.getUNDEF(VT);
6642 SDValue InVec1 = DAG.getUNDEF(VT);
6644 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6645 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6647 // Odd-numbered elements in the input build vector are obtained from
6648 // adding two integer/float elements.
6649 // Even-numbered elements in the input build vector are obtained from
6650 // subtracting two integer/float elements.
6651 unsigned ExpectedOpcode = ISD::FSUB;
6652 unsigned NextExpectedOpcode = ISD::FADD;
6653 bool AddFound = false;
6654 bool SubFound = false;
6656 for (unsigned i = 0, e = NumElts; i != e; ++i) {
6657 SDValue Op = BV->getOperand(i);
6659 // Skip 'undef' values.
6660 unsigned Opcode = Op.getOpcode();
6661 if (Opcode == ISD::UNDEF) {
6662 std::swap(ExpectedOpcode, NextExpectedOpcode);
6666 // Early exit if we found an unexpected opcode.
6667 if (Opcode != ExpectedOpcode)
6670 SDValue Op0 = Op.getOperand(0);
6671 SDValue Op1 = Op.getOperand(1);
6673 // Try to match the following pattern:
6674 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6675 // Early exit if we cannot match that sequence.
6676 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6677 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6678 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6679 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6680 Op0.getOperand(1) != Op1.getOperand(1))
6683 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6687 // We found a valid add/sub node. Update the information accordingly.
6693 // Update InVec0 and InVec1.
6694 if (InVec0.getOpcode() == ISD::UNDEF)
6695 InVec0 = Op0.getOperand(0);
6696 if (InVec1.getOpcode() == ISD::UNDEF)
6697 InVec1 = Op1.getOperand(0);
6699 // Make sure that operands in input to each add/sub node always
6700 // come from a same pair of vectors.
6701 if (InVec0 != Op0.getOperand(0)) {
6702 if (ExpectedOpcode == ISD::FSUB)
6705 // FADD is commutable. Try to commute the operands
6706 // and then test again.
6707 std::swap(Op0, Op1);
6708 if (InVec0 != Op0.getOperand(0))
6712 if (InVec1 != Op1.getOperand(0))
6715 // Update the pair of expected opcodes.
6716 std::swap(ExpectedOpcode, NextExpectedOpcode);
6719 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6720 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6721 InVec1.getOpcode() != ISD::UNDEF)
6722 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6727 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6728 const X86Subtarget *Subtarget) {
6730 EVT VT = N->getValueType(0);
6731 unsigned NumElts = VT.getVectorNumElements();
6732 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6733 SDValue InVec0, InVec1;
6735 // Try to match an ADDSUB.
6736 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6737 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6738 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6739 if (Value.getNode())
6743 // Try to match horizontal ADD/SUB.
6744 unsigned NumUndefsLO = 0;
6745 unsigned NumUndefsHI = 0;
6746 unsigned Half = NumElts/2;
6748 // Count the number of UNDEF operands in the build_vector in input.
6749 for (unsigned i = 0, e = Half; i != e; ++i)
6750 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6753 for (unsigned i = Half, e = NumElts; i != e; ++i)
6754 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6757 // Early exit if this is either a build_vector of all UNDEFs or all the
6758 // operands but one are UNDEF.
6759 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6762 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6763 // Try to match an SSE3 float HADD/HSUB.
6764 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6765 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6767 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6768 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6769 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6770 // Try to match an SSSE3 integer HADD/HSUB.
6771 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6772 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6774 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6775 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6778 if (!Subtarget->hasAVX())
6781 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6782 // Try to match an AVX horizontal add/sub of packed single/double
6783 // precision floating point values from 256-bit vectors.
6784 SDValue InVec2, InVec3;
6785 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6786 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6787 ((InVec0.getOpcode() == ISD::UNDEF ||
6788 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6789 ((InVec1.getOpcode() == ISD::UNDEF ||
6790 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6791 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6793 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6794 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6795 ((InVec0.getOpcode() == ISD::UNDEF ||
6796 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6797 ((InVec1.getOpcode() == ISD::UNDEF ||
6798 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6799 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6800 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6801 // Try to match an AVX2 horizontal add/sub of signed integers.
6802 SDValue InVec2, InVec3;
6804 bool CanFold = true;
6806 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6807 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6808 ((InVec0.getOpcode() == ISD::UNDEF ||
6809 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6810 ((InVec1.getOpcode() == ISD::UNDEF ||
6811 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6812 X86Opcode = X86ISD::HADD;
6813 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6814 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6815 ((InVec0.getOpcode() == ISD::UNDEF ||
6816 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6817 ((InVec1.getOpcode() == ISD::UNDEF ||
6818 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6819 X86Opcode = X86ISD::HSUB;
6824 // Fold this build_vector into a single horizontal add/sub.
6825 // Do this only if the target has AVX2.
6826 if (Subtarget->hasAVX2())
6827 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6829 // Do not try to expand this build_vector into a pair of horizontal
6830 // add/sub if we can emit a pair of scalar add/sub.
6831 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6834 // Convert this build_vector into a pair of horizontal binop followed by
6836 bool isUndefLO = NumUndefsLO == Half;
6837 bool isUndefHI = NumUndefsHI == Half;
6838 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6839 isUndefLO, isUndefHI);
6843 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6844 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6846 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6847 X86Opcode = X86ISD::HADD;
6848 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6849 X86Opcode = X86ISD::HSUB;
6850 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6851 X86Opcode = X86ISD::FHADD;
6852 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6853 X86Opcode = X86ISD::FHSUB;
6857 // Don't try to expand this build_vector into a pair of horizontal add/sub
6858 // if we can simply emit a pair of scalar add/sub.
6859 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6862 // Convert this build_vector into two horizontal add/sub followed by
6864 bool isUndefLO = NumUndefsLO == Half;
6865 bool isUndefHI = NumUndefsHI == Half;
6866 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6867 isUndefLO, isUndefHI);
6874 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6877 MVT VT = Op.getSimpleValueType();
6878 MVT ExtVT = VT.getVectorElementType();
6879 unsigned NumElems = Op.getNumOperands();
6881 // Generate vectors for predicate vectors.
6882 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6883 return LowerBUILD_VECTORvXi1(Op, DAG);
6885 // Vectors containing all zeros can be matched by pxor and xorps later
6886 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6887 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6888 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6889 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6892 return getZeroVector(VT, Subtarget, DAG, dl);
6895 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6896 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6897 // vpcmpeqd on 256-bit vectors.
6898 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6899 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6902 if (!VT.is512BitVector())
6903 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6906 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6907 if (Broadcast.getNode())
6910 unsigned EVTBits = ExtVT.getSizeInBits();
6912 unsigned NumZero = 0;
6913 unsigned NumNonZero = 0;
6914 unsigned NonZeros = 0;
6915 bool IsAllConstants = true;
6916 SmallSet<SDValue, 8> Values;
6917 for (unsigned i = 0; i < NumElems; ++i) {
6918 SDValue Elt = Op.getOperand(i);
6919 if (Elt.getOpcode() == ISD::UNDEF)
6922 if (Elt.getOpcode() != ISD::Constant &&
6923 Elt.getOpcode() != ISD::ConstantFP)
6924 IsAllConstants = false;
6925 if (X86::isZeroNode(Elt))
6928 NonZeros |= (1 << i);
6933 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6934 if (NumNonZero == 0)
6935 return DAG.getUNDEF(VT);
6937 // Special case for single non-zero, non-undef, element.
6938 if (NumNonZero == 1) {
6939 unsigned Idx = countTrailingZeros(NonZeros);
6940 SDValue Item = Op.getOperand(Idx);
6942 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6943 // the value are obviously zero, truncate the value to i32 and do the
6944 // insertion that way. Only do this if the value is non-constant or if the
6945 // value is a constant being inserted into element 0. It is cheaper to do
6946 // a constant pool load than it is to do a movd + shuffle.
6947 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6948 (!IsAllConstants || Idx == 0)) {
6949 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6951 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6952 EVT VecVT = MVT::v4i32;
6953 unsigned VecElts = 4;
6955 // Truncate the value (which may itself be a constant) to i32, and
6956 // convert it to a vector with movd (S2V+shuffle to zero extend).
6957 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6958 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6960 // If using the new shuffle lowering, just directly insert this.
6961 if (ExperimentalVectorShuffleLowering)
6963 ISD::BITCAST, dl, VT,
6964 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6966 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6968 // Now we have our 32-bit value zero extended in the low element of
6969 // a vector. If Idx != 0, swizzle it into place.
6971 SmallVector<int, 4> Mask;
6972 Mask.push_back(Idx);
6973 for (unsigned i = 1; i != VecElts; ++i)
6975 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6978 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6982 // If we have a constant or non-constant insertion into the low element of
6983 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6984 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6985 // depending on what the source datatype is.
6988 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6990 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6991 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6992 if (VT.is256BitVector() || VT.is512BitVector()) {
6993 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6994 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6995 Item, DAG.getIntPtrConstant(0));
6997 assert(VT.is128BitVector() && "Expected an SSE value type!");
6998 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6999 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
7000 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7003 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7004 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7005 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7006 if (VT.is256BitVector()) {
7007 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7008 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7010 assert(VT.is128BitVector() && "Expected an SSE value type!");
7011 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7013 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7017 // Is it a vector logical left shift?
7018 if (NumElems == 2 && Idx == 1 &&
7019 X86::isZeroNode(Op.getOperand(0)) &&
7020 !X86::isZeroNode(Op.getOperand(1))) {
7021 unsigned NumBits = VT.getSizeInBits();
7022 return getVShift(true, VT,
7023 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7024 VT, Op.getOperand(1)),
7025 NumBits/2, DAG, *this, dl);
7028 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7031 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7032 // is a non-constant being inserted into an element other than the low one,
7033 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7034 // movd/movss) to move this into the low element, then shuffle it into
7036 if (EVTBits == 32) {
7037 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7039 // If using the new shuffle lowering, just directly insert this.
7040 if (ExperimentalVectorShuffleLowering)
7041 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7043 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7044 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7045 SmallVector<int, 8> MaskVec;
7046 for (unsigned i = 0; i != NumElems; ++i)
7047 MaskVec.push_back(i == Idx ? 0 : 1);
7048 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7052 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7053 if (Values.size() == 1) {
7054 if (EVTBits == 32) {
7055 // Instead of a shuffle like this:
7056 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7057 // Check if it's possible to issue this instead.
7058 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7059 unsigned Idx = countTrailingZeros(NonZeros);
7060 SDValue Item = Op.getOperand(Idx);
7061 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7062 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7067 // A vector full of immediates; various special cases are already
7068 // handled, so this is best done with a single constant-pool load.
7072 // For AVX-length vectors, see if we can use a vector load to get all of the
7073 // elements, otherwise build the individual 128-bit pieces and use
7074 // shuffles to put them in place.
7075 if (VT.is256BitVector() || VT.is512BitVector()) {
7076 SmallVector<SDValue, 64> V(Op->op_begin(), Op->op_begin() + NumElems);
7078 // Check for a build vector of consecutive loads.
7079 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7082 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7084 // Build both the lower and upper subvector.
7085 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7086 makeArrayRef(&V[0], NumElems/2));
7087 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7088 makeArrayRef(&V[NumElems / 2], NumElems/2));
7090 // Recreate the wider vector with the lower and upper part.
7091 if (VT.is256BitVector())
7092 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7093 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7096 // Let legalizer expand 2-wide build_vectors.
7097 if (EVTBits == 64) {
7098 if (NumNonZero == 1) {
7099 // One half is zero or undef.
7100 unsigned Idx = countTrailingZeros(NonZeros);
7101 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7102 Op.getOperand(Idx));
7103 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7108 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7109 if (EVTBits == 8 && NumElems == 16) {
7110 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7112 if (V.getNode()) return V;
7115 if (EVTBits == 16 && NumElems == 8) {
7116 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7118 if (V.getNode()) return V;
7121 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7122 if (EVTBits == 32 && NumElems == 4) {
7123 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7128 // If element VT is == 32 bits, turn it into a number of shuffles.
7129 SmallVector<SDValue, 8> V(NumElems);
7130 if (NumElems == 4 && NumZero > 0) {
7131 for (unsigned i = 0; i < 4; ++i) {
7132 bool isZero = !(NonZeros & (1 << i));
7134 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7136 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7139 for (unsigned i = 0; i < 2; ++i) {
7140 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7143 V[i] = V[i*2]; // Must be a zero vector.
7146 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7149 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7152 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7157 bool Reverse1 = (NonZeros & 0x3) == 2;
7158 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7162 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7163 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7165 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7168 if (Values.size() > 1 && VT.is128BitVector()) {
7169 // Check for a build vector of consecutive loads.
7170 for (unsigned i = 0; i < NumElems; ++i)
7171 V[i] = Op.getOperand(i);
7173 // Check for elements which are consecutive loads.
7174 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7178 // Check for a build vector from mostly shuffle plus few inserting.
7179 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7183 // For SSE 4.1, use insertps to put the high elements into the low element.
7184 if (Subtarget->hasSSE41()) {
7186 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7187 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7189 Result = DAG.getUNDEF(VT);
7191 for (unsigned i = 1; i < NumElems; ++i) {
7192 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7193 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7194 Op.getOperand(i), DAG.getIntPtrConstant(i));
7199 // Otherwise, expand into a number of unpckl*, start by extending each of
7200 // our (non-undef) elements to the full vector width with the element in the
7201 // bottom slot of the vector (which generates no code for SSE).
7202 for (unsigned i = 0; i < NumElems; ++i) {
7203 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7204 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7206 V[i] = DAG.getUNDEF(VT);
7209 // Next, we iteratively mix elements, e.g. for v4f32:
7210 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7211 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7212 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7213 unsigned EltStride = NumElems >> 1;
7214 while (EltStride != 0) {
7215 for (unsigned i = 0; i < EltStride; ++i) {
7216 // If V[i+EltStride] is undef and this is the first round of mixing,
7217 // then it is safe to just drop this shuffle: V[i] is already in the
7218 // right place, the one element (since it's the first round) being
7219 // inserted as undef can be dropped. This isn't safe for successive
7220 // rounds because they will permute elements within both vectors.
7221 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7222 EltStride == NumElems/2)
7225 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7234 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7235 // to create 256-bit vectors from two other 128-bit ones.
7236 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7238 MVT ResVT = Op.getSimpleValueType();
7240 assert((ResVT.is256BitVector() ||
7241 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7243 SDValue V1 = Op.getOperand(0);
7244 SDValue V2 = Op.getOperand(1);
7245 unsigned NumElems = ResVT.getVectorNumElements();
7246 if(ResVT.is256BitVector())
7247 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7249 if (Op.getNumOperands() == 4) {
7250 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7251 ResVT.getVectorNumElements()/2);
7252 SDValue V3 = Op.getOperand(2);
7253 SDValue V4 = Op.getOperand(3);
7254 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7255 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7257 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7260 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7261 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7262 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7263 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7264 Op.getNumOperands() == 4)));
7266 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7267 // from two other 128-bit ones.
7269 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7270 return LowerAVXCONCAT_VECTORS(Op, DAG);
7274 //===----------------------------------------------------------------------===//
7275 // Vector shuffle lowering
7277 // This is an experimental code path for lowering vector shuffles on x86. It is
7278 // designed to handle arbitrary vector shuffles and blends, gracefully
7279 // degrading performance as necessary. It works hard to recognize idiomatic
7280 // shuffles and lower them to optimal instruction patterns without leaving
7281 // a framework that allows reasonably efficient handling of all vector shuffle
7283 //===----------------------------------------------------------------------===//
7285 /// \brief Tiny helper function to identify a no-op mask.
7287 /// This is a somewhat boring predicate function. It checks whether the mask
7288 /// array input, which is assumed to be a single-input shuffle mask of the kind
7289 /// used by the X86 shuffle instructions (not a fully general
7290 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7291 /// in-place shuffle are 'no-op's.
7292 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7293 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7294 if (Mask[i] != -1 && Mask[i] != i)
7299 /// \brief Helper function to classify a mask as a single-input mask.
7301 /// This isn't a generic single-input test because in the vector shuffle
7302 /// lowering we canonicalize single inputs to be the first input operand. This
7303 /// means we can more quickly test for a single input by only checking whether
7304 /// an input from the second operand exists. We also assume that the size of
7305 /// mask corresponds to the size of the input vectors which isn't true in the
7306 /// fully general case.
7307 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7309 if (M >= (int)Mask.size())
7314 /// \brief Test whether there are elements crossing 128-bit lanes in this
7317 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7318 /// and we routinely test for these.
7319 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7320 int LaneSize = 128 / VT.getScalarSizeInBits();
7321 int Size = Mask.size();
7322 for (int i = 0; i < Size; ++i)
7323 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7328 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7330 /// This checks a shuffle mask to see if it is performing the same
7331 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7332 /// that it is also not lane-crossing. It may however involve a blend from the
7333 /// same lane of a second vector.
7335 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7336 /// non-trivial to compute in the face of undef lanes. The representation is
7337 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7338 /// entries from both V1 and V2 inputs to the wider mask.
7340 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7341 SmallVectorImpl<int> &RepeatedMask) {
7342 int LaneSize = 128 / VT.getScalarSizeInBits();
7343 RepeatedMask.resize(LaneSize, -1);
7344 int Size = Mask.size();
7345 for (int i = 0; i < Size; ++i) {
7348 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7349 // This entry crosses lanes, so there is no way to model this shuffle.
7352 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7353 if (RepeatedMask[i % LaneSize] == -1)
7354 // This is the first non-undef entry in this slot of a 128-bit lane.
7355 RepeatedMask[i % LaneSize] =
7356 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7357 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7358 // Found a mismatch with the repeated mask.
7364 /// \brief Base case helper for testing a single mask element.
7365 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7366 BuildVectorSDNode *BV1,
7367 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7369 int Size = Mask.size();
7370 if (Mask[i] != -1 && Mask[i] != Arg) {
7371 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
7372 auto *ArgsBV = Arg < Size ? BV1 : BV2;
7373 if (!MaskBV || !ArgsBV ||
7374 MaskBV->getOperand(Mask[i] % Size) != ArgsBV->getOperand(Arg % Size))
7380 /// \brief Recursive helper to peel off and test each mask element.
7381 template <typename... Ts>
7382 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7383 BuildVectorSDNode *BV1,
7384 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7385 int i, int Arg, Ts... Args) {
7386 if (!isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i, Arg))
7389 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i + 1, Args...);
7392 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7395 /// This is a fast way to test a shuffle mask against a fixed pattern:
7397 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7399 /// It returns true if the mask is exactly as wide as the argument list, and
7400 /// each element of the mask is either -1 (signifying undef) or the value given
7401 /// in the argument.
7402 template <typename... Ts>
7403 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
7405 if (Mask.size() != sizeof...(Args))
7408 // If the values are build vectors, we can look through them to find
7409 // equivalent inputs that make the shuffles equivalent.
7410 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
7411 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
7413 // Recursively peel off arguments and test them against the mask.
7414 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, 0, Args...);
7417 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7419 /// This helper function produces an 8-bit shuffle immediate corresponding to
7420 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7421 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7424 /// NB: We rely heavily on "undef" masks preserving the input lane.
7425 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7426 SelectionDAG &DAG) {
7427 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7428 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7429 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7430 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7431 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7434 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7435 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7436 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7437 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7438 return DAG.getConstant(Imm, MVT::i8);
7441 /// \brief Try to emit a blend instruction for a shuffle using bit math.
7443 /// This is used as a fallback approach when first class blend instructions are
7444 /// unavailable. Currently it is only suitable for integer vectors, but could
7445 /// be generalized for floating point vectors if desirable.
7446 static SDValue lowerVectorShuffleAsBitBlend(SDLoc DL, MVT VT, SDValue V1,
7447 SDValue V2, ArrayRef<int> Mask,
7448 SelectionDAG &DAG) {
7449 assert(VT.isInteger() && "Only supports integer vector types!");
7450 MVT EltVT = VT.getScalarType();
7451 int NumEltBits = EltVT.getSizeInBits();
7452 SDValue Zero = DAG.getConstant(0, EltVT);
7453 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), EltVT);
7454 SmallVector<SDValue, 16> MaskOps;
7455 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7456 if (Mask[i] != -1 && Mask[i] != i && Mask[i] != i + Size)
7457 return SDValue(); // Shuffled input!
7458 MaskOps.push_back(Mask[i] < Size ? AllOnes : Zero);
7461 SDValue V1Mask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, MaskOps);
7462 V1 = DAG.getNode(ISD::AND, DL, VT, V1, V1Mask);
7463 // We have to cast V2 around.
7464 MVT MaskVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
7465 V2 = DAG.getNode(ISD::BITCAST, DL, VT,
7466 DAG.getNode(X86ISD::ANDNP, DL, MaskVT,
7467 DAG.getNode(ISD::BITCAST, DL, MaskVT, V1Mask),
7468 DAG.getNode(ISD::BITCAST, DL, MaskVT, V2)));
7469 return DAG.getNode(ISD::OR, DL, VT, V1, V2);
7472 /// \brief Try to emit a blend instruction for a shuffle.
7474 /// This doesn't do any checks for the availability of instructions for blending
7475 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7476 /// be matched in the backend with the type given. What it does check for is
7477 /// that the shuffle mask is in fact a blend.
7478 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7479 SDValue V2, ArrayRef<int> Mask,
7480 const X86Subtarget *Subtarget,
7481 SelectionDAG &DAG) {
7482 unsigned BlendMask = 0;
7483 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7484 if (Mask[i] >= Size) {
7485 if (Mask[i] != i + Size)
7486 return SDValue(); // Shuffled V2 input!
7487 BlendMask |= 1u << i;
7490 if (Mask[i] >= 0 && Mask[i] != i)
7491 return SDValue(); // Shuffled V1 input!
7493 switch (VT.SimpleTy) {
7498 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7499 DAG.getConstant(BlendMask, MVT::i8));
7503 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7507 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7508 // that instruction.
7509 if (Subtarget->hasAVX2()) {
7510 // Scale the blend by the number of 32-bit dwords per element.
7511 int Scale = VT.getScalarSizeInBits() / 32;
7513 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7514 if (Mask[i] >= Size)
7515 for (int j = 0; j < Scale; ++j)
7516 BlendMask |= 1u << (i * Scale + j);
7518 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7519 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7520 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7521 return DAG.getNode(ISD::BITCAST, DL, VT,
7522 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7523 DAG.getConstant(BlendMask, MVT::i8)));
7527 // For integer shuffles we need to expand the mask and cast the inputs to
7528 // v8i16s prior to blending.
7529 int Scale = 8 / VT.getVectorNumElements();
7531 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7532 if (Mask[i] >= Size)
7533 for (int j = 0; j < Scale; ++j)
7534 BlendMask |= 1u << (i * Scale + j);
7536 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7537 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7538 return DAG.getNode(ISD::BITCAST, DL, VT,
7539 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7540 DAG.getConstant(BlendMask, MVT::i8)));
7544 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7545 SmallVector<int, 8> RepeatedMask;
7546 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7547 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7548 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7550 for (int i = 0; i < 8; ++i)
7551 if (RepeatedMask[i] >= 16)
7552 BlendMask |= 1u << i;
7553 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7554 DAG.getConstant(BlendMask, MVT::i8));
7560 // Scale the blend by the number of bytes per element.
7561 int Scale = VT.getScalarSizeInBits() / 8;
7563 // This form of blend is always done on bytes. Compute the byte vector
7565 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
7567 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7568 // mix of LLVM's code generator and the x86 backend. We tell the code
7569 // generator that boolean values in the elements of an x86 vector register
7570 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7571 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7572 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7573 // of the element (the remaining are ignored) and 0 in that high bit would
7574 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7575 // the LLVM model for boolean values in vector elements gets the relevant
7576 // bit set, it is set backwards and over constrained relative to x86's
7578 SmallVector<SDValue, 32> VSELECTMask;
7579 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7580 for (int j = 0; j < Scale; ++j)
7581 VSELECTMask.push_back(
7582 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7583 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8));
7585 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7586 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7588 ISD::BITCAST, DL, VT,
7589 DAG.getNode(ISD::VSELECT, DL, BlendVT,
7590 DAG.getNode(ISD::BUILD_VECTOR, DL, BlendVT, VSELECTMask),
7595 llvm_unreachable("Not a supported integer vector type!");
7599 /// \brief Try to lower as a blend of elements from two inputs followed by
7600 /// a single-input permutation.
7602 /// This matches the pattern where we can blend elements from two inputs and
7603 /// then reduce the shuffle to a single-input permutation.
7604 static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
7607 SelectionDAG &DAG) {
7608 // We build up the blend mask while checking whether a blend is a viable way
7609 // to reduce the shuffle.
7610 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7611 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
7613 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7617 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
7619 if (BlendMask[Mask[i] % Size] == -1)
7620 BlendMask[Mask[i] % Size] = Mask[i];
7621 else if (BlendMask[Mask[i] % Size] != Mask[i])
7622 return SDValue(); // Can't blend in the needed input!
7624 PermuteMask[i] = Mask[i] % Size;
7627 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7628 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
7631 /// \brief Generic routine to decompose a shuffle and blend into indepndent
7632 /// blends and permutes.
7634 /// This matches the extremely common pattern for handling combined
7635 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7636 /// operations. It will try to pick the best arrangement of shuffles and
7638 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7642 SelectionDAG &DAG) {
7643 // Shuffle the input elements into the desired positions in V1 and V2 and
7644 // blend them together.
7645 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7646 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7647 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7648 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7649 if (Mask[i] >= 0 && Mask[i] < Size) {
7650 V1Mask[i] = Mask[i];
7652 } else if (Mask[i] >= Size) {
7653 V2Mask[i] = Mask[i] - Size;
7654 BlendMask[i] = i + Size;
7657 // Try to lower with the simpler initial blend strategy unless one of the
7658 // input shuffles would be a no-op. We prefer to shuffle inputs as the
7659 // shuffle may be able to fold with a load or other benefit. However, when
7660 // we'll have to do 2x as many shuffles in order to achieve this, blending
7661 // first is a better strategy.
7662 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
7663 if (SDValue BlendPerm =
7664 lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
7667 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7668 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7669 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7672 /// \brief Try to lower a vector shuffle as a byte rotation.
7674 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7675 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7676 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7677 /// try to generically lower a vector shuffle through such an pattern. It
7678 /// does not check for the profitability of lowering either as PALIGNR or
7679 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7680 /// This matches shuffle vectors that look like:
7682 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7684 /// Essentially it concatenates V1 and V2, shifts right by some number of
7685 /// elements, and takes the low elements as the result. Note that while this is
7686 /// specified as a *right shift* because x86 is little-endian, it is a *left
7687 /// rotate* of the vector lanes.
7688 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7691 const X86Subtarget *Subtarget,
7692 SelectionDAG &DAG) {
7693 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7695 int NumElts = Mask.size();
7696 int NumLanes = VT.getSizeInBits() / 128;
7697 int NumLaneElts = NumElts / NumLanes;
7699 // We need to detect various ways of spelling a rotation:
7700 // [11, 12, 13, 14, 15, 0, 1, 2]
7701 // [-1, 12, 13, 14, -1, -1, 1, -1]
7702 // [-1, -1, -1, -1, -1, -1, 1, 2]
7703 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7704 // [-1, 4, 5, 6, -1, -1, 9, -1]
7705 // [-1, 4, 5, 6, -1, -1, -1, -1]
7708 for (int l = 0; l < NumElts; l += NumLaneElts) {
7709 for (int i = 0; i < NumLaneElts; ++i) {
7710 if (Mask[l + i] == -1)
7712 assert(Mask[l + i] >= 0 && "Only -1 is a valid negative mask element!");
7714 // Get the mod-Size index and lane correct it.
7715 int LaneIdx = (Mask[l + i] % NumElts) - l;
7716 // Make sure it was in this lane.
7717 if (LaneIdx < 0 || LaneIdx >= NumLaneElts)
7720 // Determine where a rotated vector would have started.
7721 int StartIdx = i - LaneIdx;
7723 // The identity rotation isn't interesting, stop.
7726 // If we found the tail of a vector the rotation must be the missing
7727 // front. If we found the head of a vector, it must be how much of the
7729 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumLaneElts - StartIdx;
7732 Rotation = CandidateRotation;
7733 else if (Rotation != CandidateRotation)
7734 // The rotations don't match, so we can't match this mask.
7737 // Compute which value this mask is pointing at.
7738 SDValue MaskV = Mask[l + i] < NumElts ? V1 : V2;
7740 // Compute which of the two target values this index should be assigned
7741 // to. This reflects whether the high elements are remaining or the low
7742 // elements are remaining.
7743 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7745 // Either set up this value if we've not encountered it before, or check
7746 // that it remains consistent.
7749 else if (TargetV != MaskV)
7750 // This may be a rotation, but it pulls from the inputs in some
7751 // unsupported interleaving.
7756 // Check that we successfully analyzed the mask, and normalize the results.
7757 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7758 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7764 // The actual rotate instruction rotates bytes, so we need to scale the
7765 // rotation based on how many bytes are in the vector lane.
7766 int Scale = 16 / NumLaneElts;
7768 // SSSE3 targets can use the palignr instruction.
7769 if (Subtarget->hasSSSE3()) {
7770 // Cast the inputs to i8 vector of correct length to match PALIGNR.
7771 MVT AlignVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
7772 Lo = DAG.getNode(ISD::BITCAST, DL, AlignVT, Lo);
7773 Hi = DAG.getNode(ISD::BITCAST, DL, AlignVT, Hi);
7775 return DAG.getNode(ISD::BITCAST, DL, VT,
7776 DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Hi, Lo,
7777 DAG.getConstant(Rotation * Scale, MVT::i8)));
7780 assert(VT.getSizeInBits() == 128 &&
7781 "Rotate-based lowering only supports 128-bit lowering!");
7782 assert(Mask.size() <= 16 &&
7783 "Can shuffle at most 16 bytes in a 128-bit vector!");
7785 // Default SSE2 implementation
7786 int LoByteShift = 16 - Rotation * Scale;
7787 int HiByteShift = Rotation * Scale;
7789 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7790 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7791 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7793 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7794 DAG.getConstant(LoByteShift, MVT::i8));
7795 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7796 DAG.getConstant(HiByteShift, MVT::i8));
7797 return DAG.getNode(ISD::BITCAST, DL, VT,
7798 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7801 /// \brief Compute whether each element of a shuffle is zeroable.
7803 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7804 /// Either it is an undef element in the shuffle mask, the element of the input
7805 /// referenced is undef, or the element of the input referenced is known to be
7806 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7807 /// as many lanes with this technique as possible to simplify the remaining
7809 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7810 SDValue V1, SDValue V2) {
7811 SmallBitVector Zeroable(Mask.size(), false);
7813 while (V1.getOpcode() == ISD::BITCAST)
7814 V1 = V1->getOperand(0);
7815 while (V2.getOpcode() == ISD::BITCAST)
7816 V2 = V2->getOperand(0);
7818 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7819 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7821 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7823 // Handle the easy cases.
7824 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7829 // If this is an index into a build_vector node (which has the same number
7830 // of elements), dig out the input value and use it.
7831 SDValue V = M < Size ? V1 : V2;
7832 if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
7835 SDValue Input = V.getOperand(M % Size);
7836 // The UNDEF opcode check really should be dead code here, but not quite
7837 // worth asserting on (it isn't invalid, just unexpected).
7838 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7845 /// \brief Try to emit a bitmask instruction for a shuffle.
7847 /// This handles cases where we can model a blend exactly as a bitmask due to
7848 /// one of the inputs being zeroable.
7849 static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
7850 SDValue V2, ArrayRef<int> Mask,
7851 SelectionDAG &DAG) {
7852 MVT EltVT = VT.getScalarType();
7853 int NumEltBits = EltVT.getSizeInBits();
7854 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
7855 SDValue Zero = DAG.getConstant(0, IntEltVT);
7856 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
7857 if (EltVT.isFloatingPoint()) {
7858 Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
7859 AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
7861 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
7862 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7864 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7867 if (Mask[i] % Size != i)
7868 return SDValue(); // Not a blend.
7870 V = Mask[i] < Size ? V1 : V2;
7871 else if (V != (Mask[i] < Size ? V1 : V2))
7872 return SDValue(); // Can only let one input through the mask.
7874 VMaskOps[i] = AllOnes;
7877 return SDValue(); // No non-zeroable elements!
7879 SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
7880 V = DAG.getNode(VT.isFloatingPoint()
7881 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
7886 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7888 /// Attempts to match a shuffle mask against the PSLL(W/D/Q/DQ) and
7889 /// PSRL(W/D/Q/DQ) SSE2 and AVX2 logical bit-shift instructions. The function
7890 /// matches elements from one of the input vectors shuffled to the left or
7891 /// right with zeroable elements 'shifted in'. It handles both the strictly
7892 /// bit-wise element shifts and the byte shift across an entire 128-bit double
7895 /// PSHL : (little-endian) left bit shift.
7896 /// [ zz, 0, zz, 2 ]
7897 /// [ -1, 4, zz, -1 ]
7898 /// PSRL : (little-endian) right bit shift.
7900 /// [ -1, -1, 7, zz]
7901 /// PSLLDQ : (little-endian) left byte shift
7902 /// [ zz, 0, 1, 2, 3, 4, 5, 6]
7903 /// [ zz, zz, -1, -1, 2, 3, 4, -1]
7904 /// [ zz, zz, zz, zz, zz, zz, -1, 1]
7905 /// PSRLDQ : (little-endian) right byte shift
7906 /// [ 5, 6, 7, zz, zz, zz, zz, zz]
7907 /// [ -1, 5, 6, 7, zz, zz, zz, zz]
7908 /// [ 1, 2, -1, -1, -1, -1, zz, zz]
7909 static SDValue lowerVectorShuffleAsShift(SDLoc DL, MVT VT, SDValue V1,
7910 SDValue V2, ArrayRef<int> Mask,
7911 SelectionDAG &DAG) {
7912 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7914 int Size = Mask.size();
7915 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7917 auto CheckZeros = [&](int Shift, int Scale, bool Left) {
7918 for (int i = 0; i < Size; i += Scale)
7919 for (int j = 0; j < Shift; ++j)
7920 if (!Zeroable[i + j + (Left ? 0 : (Scale - Shift))])
7926 auto MatchShift = [&](int Shift, int Scale, bool Left, SDValue V) {
7927 for (int i = 0; i != Size; i += Scale) {
7928 unsigned Pos = Left ? i + Shift : i;
7929 unsigned Low = Left ? i : i + Shift;
7930 unsigned Len = Scale - Shift;
7931 if (!isSequentialOrUndefInRange(Mask, Pos, Len,
7932 Low + (V == V1 ? 0 : Size)))
7936 int ShiftEltBits = VT.getScalarSizeInBits() * Scale;
7937 bool ByteShift = ShiftEltBits > 64;
7938 unsigned OpCode = Left ? (ByteShift ? X86ISD::VSHLDQ : X86ISD::VSHLI)
7939 : (ByteShift ? X86ISD::VSRLDQ : X86ISD::VSRLI);
7940 int ShiftAmt = Shift * VT.getScalarSizeInBits() / (ByteShift ? 8 : 1);
7942 // Normalize the scale for byte shifts to still produce an i64 element
7944 Scale = ByteShift ? Scale / 2 : Scale;
7946 // We need to round trip through the appropriate type for the shift.
7947 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7948 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
7949 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
7950 "Illegal integer vector type");
7951 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7953 V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
7954 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7957 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7958 // keep doubling the size of the integer elements up to that. We can
7959 // then shift the elements of the integer vector by whole multiples of
7960 // their width within the elements of the larger integer vector. Test each
7961 // multiple to see if we can find a match with the moved element indices
7962 // and that the shifted in elements are all zeroable.
7963 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 128; Scale *= 2)
7964 for (int Shift = 1; Shift != Scale; ++Shift)
7965 for (bool Left : {true, false})
7966 if (CheckZeros(Shift, Scale, Left))
7967 for (SDValue V : {V1, V2})
7968 if (SDValue Match = MatchShift(Shift, Scale, Left, V))
7975 /// \brief Lower a vector shuffle as a zero or any extension.
7977 /// Given a specific number of elements, element bit width, and extension
7978 /// stride, produce either a zero or any extension based on the available
7979 /// features of the subtarget.
7980 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7981 SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
7982 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7983 assert(Scale > 1 && "Need a scale to extend.");
7984 int NumElements = VT.getVectorNumElements();
7985 int EltBits = VT.getScalarSizeInBits();
7986 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
7987 "Only 8, 16, and 32 bit elements can be extended.");
7988 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
7990 // Found a valid zext mask! Try various lowering strategies based on the
7991 // input type and available ISA extensions.
7992 if (Subtarget->hasSSE41()) {
7993 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
7994 NumElements / Scale);
7995 return DAG.getNode(ISD::BITCAST, DL, VT,
7996 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
7999 // For any extends we can cheat for larger element sizes and use shuffle
8000 // instructions that can fold with a load and/or copy.
8001 if (AnyExt && EltBits == 32) {
8002 int PSHUFDMask[4] = {0, -1, 1, -1};
8004 ISD::BITCAST, DL, VT,
8005 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8006 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8007 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8009 if (AnyExt && EltBits == 16 && Scale > 2) {
8010 int PSHUFDMask[4] = {0, -1, 0, -1};
8011 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8012 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8013 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
8014 int PSHUFHWMask[4] = {1, -1, -1, -1};
8016 ISD::BITCAST, DL, VT,
8017 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
8018 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
8019 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
8022 // If this would require more than 2 unpack instructions to expand, use
8023 // pshufb when available. We can only use more than 2 unpack instructions
8024 // when zero extending i8 elements which also makes it easier to use pshufb.
8025 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
8026 assert(NumElements == 16 && "Unexpected byte vector width!");
8027 SDValue PSHUFBMask[16];
8028 for (int i = 0; i < 16; ++i)
8030 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
8031 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
8032 return DAG.getNode(ISD::BITCAST, DL, VT,
8033 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
8034 DAG.getNode(ISD::BUILD_VECTOR, DL,
8035 MVT::v16i8, PSHUFBMask)));
8038 // Otherwise emit a sequence of unpacks.
8040 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
8041 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
8042 : getZeroVector(InputVT, Subtarget, DAG, DL);
8043 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
8044 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
8048 } while (Scale > 1);
8049 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
8052 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
8054 /// This routine will try to do everything in its power to cleverly lower
8055 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
8056 /// check for the profitability of this lowering, it tries to aggressively
8057 /// match this pattern. It will use all of the micro-architectural details it
8058 /// can to emit an efficient lowering. It handles both blends with all-zero
8059 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
8060 /// masking out later).
8062 /// The reason we have dedicated lowering for zext-style shuffles is that they
8063 /// are both incredibly common and often quite performance sensitive.
8064 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
8065 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8066 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8067 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8069 int Bits = VT.getSizeInBits();
8070 int NumElements = VT.getVectorNumElements();
8071 assert(VT.getScalarSizeInBits() <= 32 &&
8072 "Exceeds 32-bit integer zero extension limit");
8073 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
8075 // Define a helper function to check a particular ext-scale and lower to it if
8077 auto Lower = [&](int Scale) -> SDValue {
8080 for (int i = 0; i < NumElements; ++i) {
8082 continue; // Valid anywhere but doesn't tell us anything.
8083 if (i % Scale != 0) {
8084 // Each of the extended elements need to be zeroable.
8088 // We no longer are in the anyext case.
8093 // Each of the base elements needs to be consecutive indices into the
8094 // same input vector.
8095 SDValue V = Mask[i] < NumElements ? V1 : V2;
8098 else if (InputV != V)
8099 return SDValue(); // Flip-flopping inputs.
8101 if (Mask[i] % NumElements != i / Scale)
8102 return SDValue(); // Non-consecutive strided elements.
8105 // If we fail to find an input, we have a zero-shuffle which should always
8106 // have already been handled.
8107 // FIXME: Maybe handle this here in case during blending we end up with one?
8111 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8112 DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
8115 // The widest scale possible for extending is to a 64-bit integer.
8116 assert(Bits % 64 == 0 &&
8117 "The number of bits in a vector must be divisible by 64 on x86!");
8118 int NumExtElements = Bits / 64;
8120 // Each iteration, try extending the elements half as much, but into twice as
8122 for (; NumExtElements < NumElements; NumExtElements *= 2) {
8123 assert(NumElements % NumExtElements == 0 &&
8124 "The input vector size must be divisible by the extended size.");
8125 if (SDValue V = Lower(NumElements / NumExtElements))
8129 // General extends failed, but 128-bit vectors may be able to use MOVQ.
8133 // Returns one of the source operands if the shuffle can be reduced to a
8134 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
8135 auto CanZExtLowHalf = [&]() {
8136 for (int i = NumElements / 2; i != NumElements; ++i)
8139 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
8141 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
8146 if (SDValue V = CanZExtLowHalf()) {
8147 V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
8148 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
8149 return DAG.getNode(ISD::BITCAST, DL, VT, V);
8152 // No viable ext lowering found.
8156 /// \brief Try to get a scalar value for a specific element of a vector.
8158 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
8159 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
8160 SelectionDAG &DAG) {
8161 MVT VT = V.getSimpleValueType();
8162 MVT EltVT = VT.getVectorElementType();
8163 while (V.getOpcode() == ISD::BITCAST)
8164 V = V.getOperand(0);
8165 // If the bitcasts shift the element size, we can't extract an equivalent
8167 MVT NewVT = V.getSimpleValueType();
8168 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8171 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8172 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8173 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8178 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8180 /// This is particularly important because the set of instructions varies
8181 /// significantly based on whether the operand is a load or not.
8182 static bool isShuffleFoldableLoad(SDValue V) {
8183 while (V.getOpcode() == ISD::BITCAST)
8184 V = V.getOperand(0);
8186 return ISD::isNON_EXTLoad(V.getNode());
8189 /// \brief Try to lower insertion of a single element into a zero vector.
8191 /// This is a common pattern that we have especially efficient patterns to lower
8192 /// across all subtarget feature sets.
8193 static SDValue lowerVectorShuffleAsElementInsertion(
8194 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8195 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8196 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8198 MVT EltVT = VT.getVectorElementType();
8200 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8201 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8203 bool IsV1Zeroable = true;
8204 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8205 if (i != V2Index && !Zeroable[i]) {
8206 IsV1Zeroable = false;
8210 // Check for a single input from a SCALAR_TO_VECTOR node.
8211 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8212 // all the smarts here sunk into that routine. However, the current
8213 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8214 // vector shuffle lowering is dead.
8215 if (SDValue V2S = getScalarValueForVectorElement(
8216 V2, Mask[V2Index] - Mask.size(), DAG)) {
8217 // We need to zext the scalar if it is smaller than an i32.
8218 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8219 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8220 // Using zext to expand a narrow element won't work for non-zero
8225 // Zero-extend directly to i32.
8227 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8229 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8230 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8231 EltVT == MVT::i16) {
8232 // Either not inserting from the low element of the input or the input
8233 // element size is too small to use VZEXT_MOVL to clear the high bits.
8237 if (!IsV1Zeroable) {
8238 // If V1 can't be treated as a zero vector we have fewer options to lower
8239 // this. We can't support integer vectors or non-zero targets cheaply, and
8240 // the V1 elements can't be permuted in any way.
8241 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8242 if (!VT.isFloatingPoint() || V2Index != 0)
8244 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8245 V1Mask[V2Index] = -1;
8246 if (!isNoopShuffleMask(V1Mask))
8248 // This is essentially a special case blend operation, but if we have
8249 // general purpose blend operations, they are always faster. Bail and let
8250 // the rest of the lowering handle these as blends.
8251 if (Subtarget->hasSSE41())
8254 // Otherwise, use MOVSD or MOVSS.
8255 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8256 "Only two types of floating point element types to handle!");
8257 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8261 // This lowering only works for the low element with floating point vectors.
8262 if (VT.isFloatingPoint() && V2Index != 0)
8265 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8267 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8270 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8271 // the desired position. Otherwise it is more efficient to do a vector
8272 // shift left. We know that we can do a vector shift left because all
8273 // the inputs are zero.
8274 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8275 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8276 V2Shuffle[V2Index] = 0;
8277 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8279 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8281 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8283 V2Index * EltVT.getSizeInBits()/8,
8284 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8285 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8291 /// \brief Try to lower broadcast of a single element.
8293 /// For convenience, this code also bundles all of the subtarget feature set
8294 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8295 /// a convenient way to factor it out.
8296 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8298 const X86Subtarget *Subtarget,
8299 SelectionDAG &DAG) {
8300 if (!Subtarget->hasAVX())
8302 if (VT.isInteger() && !Subtarget->hasAVX2())
8305 // Check that the mask is a broadcast.
8306 int BroadcastIdx = -1;
8308 if (M >= 0 && BroadcastIdx == -1)
8310 else if (M >= 0 && M != BroadcastIdx)
8313 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8314 "a sorted mask where the broadcast "
8317 // Go up the chain of (vector) values to try and find a scalar load that
8318 // we can combine with the broadcast.
8320 switch (V.getOpcode()) {
8321 case ISD::CONCAT_VECTORS: {
8322 int OperandSize = Mask.size() / V.getNumOperands();
8323 V = V.getOperand(BroadcastIdx / OperandSize);
8324 BroadcastIdx %= OperandSize;
8328 case ISD::INSERT_SUBVECTOR: {
8329 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8330 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8334 int BeginIdx = (int)ConstantIdx->getZExtValue();
8336 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8337 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8338 BroadcastIdx -= BeginIdx;
8349 // Check if this is a broadcast of a scalar. We special case lowering
8350 // for scalars so that we can more effectively fold with loads.
8351 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8352 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8353 V = V.getOperand(BroadcastIdx);
8355 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8357 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8359 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8360 // We can't broadcast from a vector register w/o AVX2, and we can only
8361 // broadcast from the zero-element of a vector register.
8365 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8368 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8369 // INSERTPS when the V1 elements are already in the correct locations
8370 // because otherwise we can just always use two SHUFPS instructions which
8371 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8372 // perform INSERTPS if a single V1 element is out of place and all V2
8373 // elements are zeroable.
8374 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8376 SelectionDAG &DAG) {
8377 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8378 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8379 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8380 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8382 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8385 int V1DstIndex = -1;
8386 int V2DstIndex = -1;
8387 bool V1UsedInPlace = false;
8389 for (int i = 0; i < 4; ++i) {
8390 // Synthesize a zero mask from the zeroable elements (includes undefs).
8396 // Flag if we use any V1 inputs in place.
8398 V1UsedInPlace = true;
8402 // We can only insert a single non-zeroable element.
8403 if (V1DstIndex != -1 || V2DstIndex != -1)
8407 // V1 input out of place for insertion.
8410 // V2 input for insertion.
8415 // Don't bother if we have no (non-zeroable) element for insertion.
8416 if (V1DstIndex == -1 && V2DstIndex == -1)
8419 // Determine element insertion src/dst indices. The src index is from the
8420 // start of the inserted vector, not the start of the concatenated vector.
8421 unsigned V2SrcIndex = 0;
8422 if (V1DstIndex != -1) {
8423 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8424 // and don't use the original V2 at all.
8425 V2SrcIndex = Mask[V1DstIndex];
8426 V2DstIndex = V1DstIndex;
8429 V2SrcIndex = Mask[V2DstIndex] - 4;
8432 // If no V1 inputs are used in place, then the result is created only from
8433 // the zero mask and the V2 insertion - so remove V1 dependency.
8435 V1 = DAG.getUNDEF(MVT::v4f32);
8437 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8438 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8440 // Insert the V2 element into the desired position.
8442 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8443 DAG.getConstant(InsertPSMask, MVT::i8));
8446 /// \brief Try to lower a shuffle as a permute of the inputs followed by an
8447 /// UNPCK instruction.
8449 /// This specifically targets cases where we end up with alternating between
8450 /// the two inputs, and so can permute them into something that feeds a single
8451 /// UNPCK instruction. Note that this routine only targets integer vectors
8452 /// because for floating point vectors we have a generalized SHUFPS lowering
8453 /// strategy that handles everything that doesn't *exactly* match an unpack,
8454 /// making this clever lowering unnecessary.
8455 static SDValue lowerVectorShuffleAsUnpack(MVT VT, SDLoc DL, SDValue V1,
8456 SDValue V2, ArrayRef<int> Mask,
8457 SelectionDAG &DAG) {
8458 assert(!VT.isFloatingPoint() &&
8459 "This routine only supports integer vectors.");
8460 assert(!isSingleInputShuffleMask(Mask) &&
8461 "This routine should only be used when blending two inputs.");
8462 assert(Mask.size() >= 2 && "Single element masks are invalid.");
8464 int Size = Mask.size();
8466 int NumLoInputs = std::count_if(Mask.begin(), Mask.end(), [Size](int M) {
8467 return M >= 0 && M % Size < Size / 2;
8469 int NumHiInputs = std::count_if(
8470 Mask.begin(), Mask.end(), [Size](int M) { return M % Size >= Size / 2; });
8472 bool UnpackLo = NumLoInputs >= NumHiInputs;
8474 auto TryUnpack = [&](MVT UnpackVT, int Scale) {
8475 SmallVector<int, 32> V1Mask(Mask.size(), -1);
8476 SmallVector<int, 32> V2Mask(Mask.size(), -1);
8478 for (int i = 0; i < Size; ++i) {
8482 // Each element of the unpack contains Scale elements from this mask.
8483 int UnpackIdx = i / Scale;
8485 // We only handle the case where V1 feeds the first slots of the unpack.
8486 // We rely on canonicalization to ensure this is the case.
8487 if ((UnpackIdx % 2 == 0) != (Mask[i] < Size))
8490 // Setup the mask for this input. The indexing is tricky as we have to
8491 // handle the unpack stride.
8492 SmallVectorImpl<int> &VMask = (UnpackIdx % 2 == 0) ? V1Mask : V2Mask;
8493 VMask[(UnpackIdx / 2) * Scale + i % Scale + (UnpackLo ? 0 : Size / 2)] =
8497 // If we will have to shuffle both inputs to use the unpack, check whether
8498 // we can just unpack first and shuffle the result. If so, skip this unpack.
8499 if ((NumLoInputs == 0 || NumHiInputs == 0) && !isNoopShuffleMask(V1Mask) &&
8500 !isNoopShuffleMask(V2Mask))
8503 // Shuffle the inputs into place.
8504 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
8505 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
8507 // Cast the inputs to the type we will use to unpack them.
8508 V1 = DAG.getNode(ISD::BITCAST, DL, UnpackVT, V1);
8509 V2 = DAG.getNode(ISD::BITCAST, DL, UnpackVT, V2);
8511 // Unpack the inputs and cast the result back to the desired type.
8512 return DAG.getNode(ISD::BITCAST, DL, VT,
8513 DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
8514 DL, UnpackVT, V1, V2));
8517 // We try each unpack from the largest to the smallest to try and find one
8518 // that fits this mask.
8519 int OrigNumElements = VT.getVectorNumElements();
8520 int OrigScalarSize = VT.getScalarSizeInBits();
8521 for (int ScalarSize = 64; ScalarSize >= OrigScalarSize; ScalarSize /= 2) {
8522 int Scale = ScalarSize / OrigScalarSize;
8523 int NumElements = OrigNumElements / Scale;
8524 MVT UnpackVT = MVT::getVectorVT(MVT::getIntegerVT(ScalarSize), NumElements);
8525 if (SDValue Unpack = TryUnpack(UnpackVT, Scale))
8529 // If none of the unpack-rooted lowerings worked (or were profitable) try an
8531 if (NumLoInputs == 0 || NumHiInputs == 0) {
8532 assert((NumLoInputs > 0 || NumHiInputs > 0) &&
8533 "We have to have *some* inputs!");
8534 int HalfOffset = NumLoInputs == 0 ? Size / 2 : 0;
8536 // FIXME: We could consider the total complexity of the permute of each
8537 // possible unpacking. Or at the least we should consider how many
8538 // half-crossings are created.
8539 // FIXME: We could consider commuting the unpacks.
8541 SmallVector<int, 32> PermMask;
8542 PermMask.assign(Size, -1);
8543 for (int i = 0; i < Size; ++i) {
8547 assert(Mask[i] % Size >= HalfOffset && "Found input from wrong half!");
8550 2 * ((Mask[i] % Size) - HalfOffset) + (Mask[i] < Size ? 0 : 1);
8552 return DAG.getVectorShuffle(
8553 VT, DL, DAG.getNode(NumLoInputs == 0 ? X86ISD::UNPCKH : X86ISD::UNPCKL,
8555 DAG.getUNDEF(VT), PermMask);
8561 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8563 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8564 /// support for floating point shuffles but not integer shuffles. These
8565 /// instructions will incur a domain crossing penalty on some chips though so
8566 /// it is better to avoid lowering through this for integer vectors where
8568 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8569 const X86Subtarget *Subtarget,
8570 SelectionDAG &DAG) {
8572 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8573 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8574 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8575 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8576 ArrayRef<int> Mask = SVOp->getMask();
8577 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8579 if (isSingleInputShuffleMask(Mask)) {
8580 // Use low duplicate instructions for masks that match their pattern.
8581 if (Subtarget->hasSSE3())
8582 if (isShuffleEquivalent(V1, V2, Mask, 0, 0))
8583 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8585 // Straight shuffle of a single input vector. Simulate this by using the
8586 // single input as both of the "inputs" to this instruction..
8587 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8589 if (Subtarget->hasAVX()) {
8590 // If we have AVX, we can use VPERMILPS which will allow folding a load
8591 // into the shuffle.
8592 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8593 DAG.getConstant(SHUFPDMask, MVT::i8));
8596 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8597 DAG.getConstant(SHUFPDMask, MVT::i8));
8599 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8600 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8602 // If we have a single input, insert that into V1 if we can do so cheaply.
8603 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8604 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8605 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8607 // Try inverting the insertion since for v2 masks it is easy to do and we
8608 // can't reliably sort the mask one way or the other.
8609 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8610 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8611 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8612 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8616 // Try to use one of the special instruction patterns to handle two common
8617 // blend patterns if a zero-blend above didn't work.
8618 if (isShuffleEquivalent(V1, V2, Mask, 0, 3) || isShuffleEquivalent(V1, V2, Mask, 1, 3))
8619 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8620 // We can either use a special instruction to load over the low double or
8621 // to move just the low double.
8623 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8625 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8627 if (Subtarget->hasSSE41())
8628 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8632 // Use dedicated unpack instructions for masks that match their pattern.
8633 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8634 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8635 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8636 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8638 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8639 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8640 DAG.getConstant(SHUFPDMask, MVT::i8));
8643 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8645 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8646 /// the integer unit to minimize domain crossing penalties. However, for blends
8647 /// it falls back to the floating point shuffle operation with appropriate bit
8649 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8650 const X86Subtarget *Subtarget,
8651 SelectionDAG &DAG) {
8653 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8654 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8655 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8656 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8657 ArrayRef<int> Mask = SVOp->getMask();
8658 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8660 if (isSingleInputShuffleMask(Mask)) {
8661 // Check for being able to broadcast a single element.
8662 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8663 Mask, Subtarget, DAG))
8666 // Straight shuffle of a single input vector. For everything from SSE2
8667 // onward this has a single fast instruction with no scary immediates.
8668 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8669 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8670 int WidenedMask[4] = {
8671 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8672 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8674 ISD::BITCAST, DL, MVT::v2i64,
8675 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8676 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8678 assert(Mask[0] != -1 && "No undef lanes in multi-input v2 shuffles!");
8679 assert(Mask[1] != -1 && "No undef lanes in multi-input v2 shuffles!");
8680 assert(Mask[0] < 2 && "We sort V1 to be the first input.");
8681 assert(Mask[1] >= 2 && "We sort V2 to be the second input.");
8683 // If we have a blend of two PACKUS operations an the blend aligns with the
8684 // low and half halves, we can just merge the PACKUS operations. This is
8685 // particularly important as it lets us merge shuffles that this routine itself
8687 auto GetPackNode = [](SDValue V) {
8688 while (V.getOpcode() == ISD::BITCAST)
8689 V = V.getOperand(0);
8691 return V.getOpcode() == X86ISD::PACKUS ? V : SDValue();
8693 if (SDValue V1Pack = GetPackNode(V1))
8694 if (SDValue V2Pack = GetPackNode(V2))
8695 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8696 DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8,
8697 Mask[0] == 0 ? V1Pack.getOperand(0)
8698 : V1Pack.getOperand(1),
8699 Mask[1] == 2 ? V2Pack.getOperand(0)
8700 : V2Pack.getOperand(1)));
8702 // Try to use shift instructions.
8704 lowerVectorShuffleAsShift(DL, MVT::v2i64, V1, V2, Mask, DAG))
8707 // When loading a scalar and then shuffling it into a vector we can often do
8708 // the insertion cheaply.
8709 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8710 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8712 // Try inverting the insertion since for v2 masks it is easy to do and we
8713 // can't reliably sort the mask one way or the other.
8714 int InverseMask[2] = {Mask[0] ^ 2, Mask[1] ^ 2};
8715 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8716 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8719 // We have different paths for blend lowering, but they all must use the
8720 // *exact* same predicate.
8721 bool IsBlendSupported = Subtarget->hasSSE41();
8722 if (IsBlendSupported)
8723 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8727 // Use dedicated unpack instructions for masks that match their pattern.
8728 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8729 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8730 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8731 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8733 // Try to use byte rotation instructions.
8734 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8735 if (Subtarget->hasSSSE3())
8736 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8737 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8740 // If we have direct support for blends, we should lower by decomposing into
8741 // a permute. That will be faster than the domain cross.
8742 if (IsBlendSupported)
8743 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
8746 // We implement this with SHUFPD which is pretty lame because it will likely
8747 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8748 // However, all the alternatives are still more cycles and newer chips don't
8749 // have this problem. It would be really nice if x86 had better shuffles here.
8750 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8751 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8752 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8753 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8756 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
8758 /// This is used to disable more specialized lowerings when the shufps lowering
8759 /// will happen to be efficient.
8760 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
8761 // This routine only handles 128-bit shufps.
8762 assert(Mask.size() == 4 && "Unsupported mask size!");
8764 // To lower with a single SHUFPS we need to have the low half and high half
8765 // each requiring a single input.
8766 if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
8768 if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
8774 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8776 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8777 /// It makes no assumptions about whether this is the *best* lowering, it simply
8779 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8780 ArrayRef<int> Mask, SDValue V1,
8781 SDValue V2, SelectionDAG &DAG) {
8782 SDValue LowV = V1, HighV = V2;
8783 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8786 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8788 if (NumV2Elements == 1) {
8790 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8793 // Compute the index adjacent to V2Index and in the same half by toggling
8795 int V2AdjIndex = V2Index ^ 1;
8797 if (Mask[V2AdjIndex] == -1) {
8798 // Handles all the cases where we have a single V2 element and an undef.
8799 // This will only ever happen in the high lanes because we commute the
8800 // vector otherwise.
8802 std::swap(LowV, HighV);
8803 NewMask[V2Index] -= 4;
8805 // Handle the case where the V2 element ends up adjacent to a V1 element.
8806 // To make this work, blend them together as the first step.
8807 int V1Index = V2AdjIndex;
8808 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8809 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8810 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8812 // Now proceed to reconstruct the final blend as we have the necessary
8813 // high or low half formed.
8820 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8821 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8823 } else if (NumV2Elements == 2) {
8824 if (Mask[0] < 4 && Mask[1] < 4) {
8825 // Handle the easy case where we have V1 in the low lanes and V2 in the
8829 } else if (Mask[2] < 4 && Mask[3] < 4) {
8830 // We also handle the reversed case because this utility may get called
8831 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8832 // arrange things in the right direction.
8838 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8839 // trying to place elements directly, just blend them and set up the final
8840 // shuffle to place them.
8842 // The first two blend mask elements are for V1, the second two are for
8844 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8845 Mask[2] < 4 ? Mask[2] : Mask[3],
8846 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8847 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8848 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8849 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8851 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8854 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8855 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8856 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8857 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8860 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8861 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8864 /// \brief Lower 4-lane 32-bit floating point shuffles.
8866 /// Uses instructions exclusively from the floating point unit to minimize
8867 /// domain crossing penalties, as these are sufficient to implement all v4f32
8869 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8870 const X86Subtarget *Subtarget,
8871 SelectionDAG &DAG) {
8873 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8874 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8875 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8876 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8877 ArrayRef<int> Mask = SVOp->getMask();
8878 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8881 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8883 if (NumV2Elements == 0) {
8884 // Check for being able to broadcast a single element.
8885 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8886 Mask, Subtarget, DAG))
8889 // Use even/odd duplicate instructions for masks that match their pattern.
8890 if (Subtarget->hasSSE3()) {
8891 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
8892 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8893 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3))
8894 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8897 if (Subtarget->hasAVX()) {
8898 // If we have AVX, we can use VPERMILPS which will allow folding a load
8899 // into the shuffle.
8900 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8901 getV4X86ShuffleImm8ForMask(Mask, DAG));
8904 // Otherwise, use a straight shuffle of a single input vector. We pass the
8905 // input vector to both operands to simulate this with a SHUFPS.
8906 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8907 getV4X86ShuffleImm8ForMask(Mask, DAG));
8910 // There are special ways we can lower some single-element blends. However, we
8911 // have custom ways we can lower more complex single-element blends below that
8912 // we defer to if both this and BLENDPS fail to match, so restrict this to
8913 // when the V2 input is targeting element 0 of the mask -- that is the fast
8915 if (NumV2Elements == 1 && Mask[0] >= 4)
8916 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8917 Mask, Subtarget, DAG))
8920 if (Subtarget->hasSSE41()) {
8921 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8925 // Use INSERTPS if we can complete the shuffle efficiently.
8926 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8929 if (!isSingleSHUFPSMask(Mask))
8930 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
8931 DL, MVT::v4f32, V1, V2, Mask, DAG))
8935 // Use dedicated unpack instructions for masks that match their pattern.
8936 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8937 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8938 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8939 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8940 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 5, 1))
8941 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V2, V1);
8942 if (isShuffleEquivalent(V1, V2, Mask, 6, 2, 7, 3))
8943 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V2, V1);
8945 // Otherwise fall back to a SHUFPS lowering strategy.
8946 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8949 /// \brief Lower 4-lane i32 vector shuffles.
8951 /// We try to handle these with integer-domain shuffles where we can, but for
8952 /// blends we use the floating point domain blend instructions.
8953 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8954 const X86Subtarget *Subtarget,
8955 SelectionDAG &DAG) {
8957 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8958 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8959 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8960 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8961 ArrayRef<int> Mask = SVOp->getMask();
8962 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8964 // Whenever we can lower this as a zext, that instruction is strictly faster
8965 // than any alternative. It also allows us to fold memory operands into the
8966 // shuffle in many cases.
8967 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8968 Mask, Subtarget, DAG))
8972 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8974 if (NumV2Elements == 0) {
8975 // Check for being able to broadcast a single element.
8976 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8977 Mask, Subtarget, DAG))
8980 // Straight shuffle of a single input vector. For everything from SSE2
8981 // onward this has a single fast instruction with no scary immediates.
8982 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8983 // but we aren't actually going to use the UNPCK instruction because doing
8984 // so prevents folding a load into this instruction or making a copy.
8985 const int UnpackLoMask[] = {0, 0, 1, 1};
8986 const int UnpackHiMask[] = {2, 2, 3, 3};
8987 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 1, 1))
8988 Mask = UnpackLoMask;
8989 else if (isShuffleEquivalent(V1, V2, Mask, 2, 2, 3, 3))
8990 Mask = UnpackHiMask;
8992 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8993 getV4X86ShuffleImm8ForMask(Mask, DAG));
8996 // Try to use shift instructions.
8998 lowerVectorShuffleAsShift(DL, MVT::v4i32, V1, V2, Mask, DAG))
9001 // There are special ways we can lower some single-element blends.
9002 if (NumV2Elements == 1)
9003 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
9004 Mask, Subtarget, DAG))
9007 // We have different paths for blend lowering, but they all must use the
9008 // *exact* same predicate.
9009 bool IsBlendSupported = Subtarget->hasSSE41();
9010 if (IsBlendSupported)
9011 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
9015 if (SDValue Masked =
9016 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
9019 // Use dedicated unpack instructions for masks that match their pattern.
9020 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
9021 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
9022 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
9023 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
9024 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 5, 1))
9025 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V2, V1);
9026 if (isShuffleEquivalent(V1, V2, Mask, 6, 2, 7, 3))
9027 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V2, V1);
9029 // Try to use byte rotation instructions.
9030 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
9031 if (Subtarget->hasSSSE3())
9032 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9033 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
9036 // If we have direct support for blends, we should lower by decomposing into
9037 // a permute. That will be faster than the domain cross.
9038 if (IsBlendSupported)
9039 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
9042 // Try to lower by permuting the inputs into an unpack instruction.
9043 if (SDValue Unpack =
9044 lowerVectorShuffleAsUnpack(MVT::v4i32, DL, V1, V2, Mask, DAG))
9047 // We implement this with SHUFPS because it can blend from two vectors.
9048 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
9049 // up the inputs, bypassing domain shift penalties that we would encur if we
9050 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
9052 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
9053 DAG.getVectorShuffle(
9055 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
9056 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
9059 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
9060 /// shuffle lowering, and the most complex part.
9062 /// The lowering strategy is to try to form pairs of input lanes which are
9063 /// targeted at the same half of the final vector, and then use a dword shuffle
9064 /// to place them onto the right half, and finally unpack the paired lanes into
9065 /// their final position.
9067 /// The exact breakdown of how to form these dword pairs and align them on the
9068 /// correct sides is really tricky. See the comments within the function for
9069 /// more of the details.
9070 static SDValue lowerV8I16SingleInputVectorShuffle(
9071 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
9072 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
9073 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9074 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
9075 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
9077 SmallVector<int, 4> LoInputs;
9078 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
9079 [](int M) { return M >= 0; });
9080 std::sort(LoInputs.begin(), LoInputs.end());
9081 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
9082 SmallVector<int, 4> HiInputs;
9083 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
9084 [](int M) { return M >= 0; });
9085 std::sort(HiInputs.begin(), HiInputs.end());
9086 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
9088 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
9089 int NumHToL = LoInputs.size() - NumLToL;
9091 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
9092 int NumHToH = HiInputs.size() - NumLToH;
9093 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
9094 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
9095 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
9096 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
9098 // Check for being able to broadcast a single element.
9099 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
9100 Mask, Subtarget, DAG))
9103 // Try to use shift instructions.
9105 lowerVectorShuffleAsShift(DL, MVT::v8i16, V, V, Mask, DAG))
9108 // Use dedicated unpack instructions for masks that match their pattern.
9109 if (isShuffleEquivalent(V, V, Mask, 0, 0, 1, 1, 2, 2, 3, 3))
9110 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
9111 if (isShuffleEquivalent(V, V, Mask, 4, 4, 5, 5, 6, 6, 7, 7))
9112 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
9114 // Try to use byte rotation instructions.
9115 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9116 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
9119 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
9120 // such inputs we can swap two of the dwords across the half mark and end up
9121 // with <=2 inputs to each half in each half. Once there, we can fall through
9122 // to the generic code below. For example:
9124 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9125 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
9127 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
9128 // and an existing 2-into-2 on the other half. In this case we may have to
9129 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
9130 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
9131 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
9132 // because any other situation (including a 3-into-1 or 1-into-3 in the other
9133 // half than the one we target for fixing) will be fixed when we re-enter this
9134 // path. We will also combine away any sequence of PSHUFD instructions that
9135 // result into a single instruction. Here is an example of the tricky case:
9137 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9138 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
9140 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
9142 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
9143 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
9145 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
9146 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
9148 // The result is fine to be handled by the generic logic.
9149 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
9150 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
9151 int AOffset, int BOffset) {
9152 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
9153 "Must call this with A having 3 or 1 inputs from the A half.");
9154 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
9155 "Must call this with B having 1 or 3 inputs from the B half.");
9156 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
9157 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
9159 // Compute the index of dword with only one word among the three inputs in
9160 // a half by taking the sum of the half with three inputs and subtracting
9161 // the sum of the actual three inputs. The difference is the remaining
9164 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
9165 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
9166 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
9167 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
9168 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
9169 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
9170 int TripleNonInputIdx =
9171 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
9172 TripleDWord = TripleNonInputIdx / 2;
9174 // We use xor with one to compute the adjacent DWord to whichever one the
9176 OneInputDWord = (OneInput / 2) ^ 1;
9178 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
9179 // and BToA inputs. If there is also such a problem with the BToB and AToB
9180 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
9181 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
9182 // is essential that we don't *create* a 3<-1 as then we might oscillate.
9183 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
9184 // Compute how many inputs will be flipped by swapping these DWords. We
9186 // to balance this to ensure we don't form a 3-1 shuffle in the other
9188 int NumFlippedAToBInputs =
9189 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
9190 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
9191 int NumFlippedBToBInputs =
9192 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
9193 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
9194 if ((NumFlippedAToBInputs == 1 &&
9195 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
9196 (NumFlippedBToBInputs == 1 &&
9197 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
9198 // We choose whether to fix the A half or B half based on whether that
9199 // half has zero flipped inputs. At zero, we may not be able to fix it
9200 // with that half. We also bias towards fixing the B half because that
9201 // will more commonly be the high half, and we have to bias one way.
9202 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
9203 ArrayRef<int> Inputs) {
9204 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
9205 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
9206 PinnedIdx ^ 1) != Inputs.end();
9207 // Determine whether the free index is in the flipped dword or the
9208 // unflipped dword based on where the pinned index is. We use this bit
9209 // in an xor to conditionally select the adjacent dword.
9210 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
9211 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9212 FixFreeIdx) != Inputs.end();
9213 if (IsFixIdxInput == IsFixFreeIdxInput)
9215 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9216 FixFreeIdx) != Inputs.end();
9217 assert(IsFixIdxInput != IsFixFreeIdxInput &&
9218 "We need to be changing the number of flipped inputs!");
9219 int PSHUFHalfMask[] = {0, 1, 2, 3};
9220 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
9221 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
9223 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
9226 if (M != -1 && M == FixIdx)
9228 else if (M != -1 && M == FixFreeIdx)
9231 if (NumFlippedBToBInputs != 0) {
9233 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9234 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
9236 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
9238 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9239 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
9244 int PSHUFDMask[] = {0, 1, 2, 3};
9245 PSHUFDMask[ADWord] = BDWord;
9246 PSHUFDMask[BDWord] = ADWord;
9247 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9248 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9249 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9250 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9252 // Adjust the mask to match the new locations of A and B.
9254 if (M != -1 && M/2 == ADWord)
9255 M = 2 * BDWord + M % 2;
9256 else if (M != -1 && M/2 == BDWord)
9257 M = 2 * ADWord + M % 2;
9259 // Recurse back into this routine to re-compute state now that this isn't
9260 // a 3 and 1 problem.
9261 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9264 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
9265 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
9266 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
9267 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
9269 // At this point there are at most two inputs to the low and high halves from
9270 // each half. That means the inputs can always be grouped into dwords and
9271 // those dwords can then be moved to the correct half with a dword shuffle.
9272 // We use at most one low and one high word shuffle to collect these paired
9273 // inputs into dwords, and finally a dword shuffle to place them.
9274 int PSHUFLMask[4] = {-1, -1, -1, -1};
9275 int PSHUFHMask[4] = {-1, -1, -1, -1};
9276 int PSHUFDMask[4] = {-1, -1, -1, -1};
9278 // First fix the masks for all the inputs that are staying in their
9279 // original halves. This will then dictate the targets of the cross-half
9281 auto fixInPlaceInputs =
9282 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9283 MutableArrayRef<int> SourceHalfMask,
9284 MutableArrayRef<int> HalfMask, int HalfOffset) {
9285 if (InPlaceInputs.empty())
9287 if (InPlaceInputs.size() == 1) {
9288 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9289 InPlaceInputs[0] - HalfOffset;
9290 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9293 if (IncomingInputs.empty()) {
9294 // Just fix all of the in place inputs.
9295 for (int Input : InPlaceInputs) {
9296 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9297 PSHUFDMask[Input / 2] = Input / 2;
9302 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9303 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9304 InPlaceInputs[0] - HalfOffset;
9305 // Put the second input next to the first so that they are packed into
9306 // a dword. We find the adjacent index by toggling the low bit.
9307 int AdjIndex = InPlaceInputs[0] ^ 1;
9308 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9309 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9310 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9312 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9313 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9315 // Now gather the cross-half inputs and place them into a free dword of
9316 // their target half.
9317 // FIXME: This operation could almost certainly be simplified dramatically to
9318 // look more like the 3-1 fixing operation.
9319 auto moveInputsToRightHalf = [&PSHUFDMask](
9320 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9321 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9322 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9324 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9325 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
9327 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9329 int LowWord = Word & ~1;
9330 int HighWord = Word | 1;
9331 return isWordClobbered(SourceHalfMask, LowWord) ||
9332 isWordClobbered(SourceHalfMask, HighWord);
9335 if (IncomingInputs.empty())
9338 if (ExistingInputs.empty()) {
9339 // Map any dwords with inputs from them into the right half.
9340 for (int Input : IncomingInputs) {
9341 // If the source half mask maps over the inputs, turn those into
9342 // swaps and use the swapped lane.
9343 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9344 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
9345 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9346 Input - SourceOffset;
9347 // We have to swap the uses in our half mask in one sweep.
9348 for (int &M : HalfMask)
9349 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9351 else if (M == Input)
9352 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9354 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9355 Input - SourceOffset &&
9356 "Previous placement doesn't match!");
9358 // Note that this correctly re-maps both when we do a swap and when
9359 // we observe the other side of the swap above. We rely on that to
9360 // avoid swapping the members of the input list directly.
9361 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9364 // Map the input's dword into the correct half.
9365 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9366 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9368 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9370 "Previous placement doesn't match!");
9373 // And just directly shift any other-half mask elements to be same-half
9374 // as we will have mirrored the dword containing the element into the
9375 // same position within that half.
9376 for (int &M : HalfMask)
9377 if (M >= SourceOffset && M < SourceOffset + 4) {
9378 M = M - SourceOffset + DestOffset;
9379 assert(M >= 0 && "This should never wrap below zero!");
9384 // Ensure we have the input in a viable dword of its current half. This
9385 // is particularly tricky because the original position may be clobbered
9386 // by inputs being moved and *staying* in that half.
9387 if (IncomingInputs.size() == 1) {
9388 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9389 int InputFixed = std::find(std::begin(SourceHalfMask),
9390 std::end(SourceHalfMask), -1) -
9391 std::begin(SourceHalfMask) + SourceOffset;
9392 SourceHalfMask[InputFixed - SourceOffset] =
9393 IncomingInputs[0] - SourceOffset;
9394 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9396 IncomingInputs[0] = InputFixed;
9398 } else if (IncomingInputs.size() == 2) {
9399 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9400 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9401 // We have two non-adjacent or clobbered inputs we need to extract from
9402 // the source half. To do this, we need to map them into some adjacent
9403 // dword slot in the source mask.
9404 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9405 IncomingInputs[1] - SourceOffset};
9407 // If there is a free slot in the source half mask adjacent to one of
9408 // the inputs, place the other input in it. We use (Index XOR 1) to
9409 // compute an adjacent index.
9410 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9411 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9412 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9413 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9414 InputsFixed[1] = InputsFixed[0] ^ 1;
9415 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9416 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9417 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9418 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9419 InputsFixed[0] = InputsFixed[1] ^ 1;
9420 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9421 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9422 // The two inputs are in the same DWord but it is clobbered and the
9423 // adjacent DWord isn't used at all. Move both inputs to the free
9425 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9426 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9427 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9428 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9430 // The only way we hit this point is if there is no clobbering
9431 // (because there are no off-half inputs to this half) and there is no
9432 // free slot adjacent to one of the inputs. In this case, we have to
9433 // swap an input with a non-input.
9434 for (int i = 0; i < 4; ++i)
9435 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9436 "We can't handle any clobbers here!");
9437 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9438 "Cannot have adjacent inputs here!");
9440 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9441 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9443 // We also have to update the final source mask in this case because
9444 // it may need to undo the above swap.
9445 for (int &M : FinalSourceHalfMask)
9446 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9447 M = InputsFixed[1] + SourceOffset;
9448 else if (M == InputsFixed[1] + SourceOffset)
9449 M = (InputsFixed[0] ^ 1) + SourceOffset;
9451 InputsFixed[1] = InputsFixed[0] ^ 1;
9454 // Point everything at the fixed inputs.
9455 for (int &M : HalfMask)
9456 if (M == IncomingInputs[0])
9457 M = InputsFixed[0] + SourceOffset;
9458 else if (M == IncomingInputs[1])
9459 M = InputsFixed[1] + SourceOffset;
9461 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9462 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9465 llvm_unreachable("Unhandled input size!");
9468 // Now hoist the DWord down to the right half.
9469 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9470 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9471 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9472 for (int &M : HalfMask)
9473 for (int Input : IncomingInputs)
9475 M = FreeDWord * 2 + Input % 2;
9477 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9478 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9479 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9480 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9482 // Now enact all the shuffles we've computed to move the inputs into their
9484 if (!isNoopShuffleMask(PSHUFLMask))
9485 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9486 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9487 if (!isNoopShuffleMask(PSHUFHMask))
9488 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9489 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9490 if (!isNoopShuffleMask(PSHUFDMask))
9491 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9492 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9493 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9494 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9496 // At this point, each half should contain all its inputs, and we can then
9497 // just shuffle them into their final position.
9498 assert(std::count_if(LoMask.begin(), LoMask.end(),
9499 [](int M) { return M >= 4; }) == 0 &&
9500 "Failed to lift all the high half inputs to the low mask!");
9501 assert(std::count_if(HiMask.begin(), HiMask.end(),
9502 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9503 "Failed to lift all the low half inputs to the high mask!");
9505 // Do a half shuffle for the low mask.
9506 if (!isNoopShuffleMask(LoMask))
9507 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9508 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9510 // Do a half shuffle with the high mask after shifting its values down.
9511 for (int &M : HiMask)
9514 if (!isNoopShuffleMask(HiMask))
9515 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9516 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9521 /// \brief Helper to form a PSHUFB-based shuffle+blend.
9522 static SDValue lowerVectorShuffleAsPSHUFB(SDLoc DL, MVT VT, SDValue V1,
9523 SDValue V2, ArrayRef<int> Mask,
9524 SelectionDAG &DAG, bool &V1InUse,
9526 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9532 int Size = Mask.size();
9533 int Scale = 16 / Size;
9534 for (int i = 0; i < 16; ++i) {
9535 if (Mask[i / Scale] == -1) {
9536 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9538 const int ZeroMask = 0x80;
9539 int V1Idx = Mask[i / Scale] < Size ? Mask[i / Scale] * Scale + i % Scale
9541 int V2Idx = Mask[i / Scale] < Size
9543 : (Mask[i / Scale] - Size) * Scale + i % Scale;
9544 if (Zeroable[i / Scale])
9545 V1Idx = V2Idx = ZeroMask;
9546 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
9547 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
9548 V1InUse |= (ZeroMask != V1Idx);
9549 V2InUse |= (ZeroMask != V2Idx);
9554 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
9555 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, V1),
9556 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9558 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8,
9559 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, V2),
9560 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9562 // If we need shuffled inputs from both, blend the two.
9564 if (V1InUse && V2InUse)
9565 V = DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9567 V = V1InUse ? V1 : V2;
9569 // Cast the result back to the correct type.
9570 return DAG.getNode(ISD::BITCAST, DL, VT, V);
9573 /// \brief Generic lowering of 8-lane i16 shuffles.
9575 /// This handles both single-input shuffles and combined shuffle/blends with
9576 /// two inputs. The single input shuffles are immediately delegated to
9577 /// a dedicated lowering routine.
9579 /// The blends are lowered in one of three fundamental ways. If there are few
9580 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9581 /// of the input is significantly cheaper when lowered as an interleaving of
9582 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9583 /// halves of the inputs separately (making them have relatively few inputs)
9584 /// and then concatenate them.
9585 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9586 const X86Subtarget *Subtarget,
9587 SelectionDAG &DAG) {
9589 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9590 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9591 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9592 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9593 ArrayRef<int> OrigMask = SVOp->getMask();
9594 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9595 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9596 MutableArrayRef<int> Mask(MaskStorage);
9598 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9600 // Whenever we can lower this as a zext, that instruction is strictly faster
9601 // than any alternative.
9602 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9603 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9606 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9607 auto isV2 = [](int M) { return M >= 8; };
9609 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9610 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9612 if (NumV2Inputs == 0)
9613 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9615 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9616 "to be V1-input shuffles.");
9618 // Try to use shift instructions.
9620 lowerVectorShuffleAsShift(DL, MVT::v8i16, V1, V2, Mask, DAG))
9623 // There are special ways we can lower some single-element blends.
9624 if (NumV2Inputs == 1)
9625 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9626 Mask, Subtarget, DAG))
9629 // We have different paths for blend lowering, but they all must use the
9630 // *exact* same predicate.
9631 bool IsBlendSupported = Subtarget->hasSSE41();
9632 if (IsBlendSupported)
9633 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9637 if (SDValue Masked =
9638 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
9641 // Use dedicated unpack instructions for masks that match their pattern.
9642 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9643 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9644 if (isShuffleEquivalent(V1, V2, Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9645 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9647 // Try to use byte rotation instructions.
9648 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9649 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9652 if (SDValue BitBlend =
9653 lowerVectorShuffleAsBitBlend(DL, MVT::v8i16, V1, V2, Mask, DAG))
9656 if (SDValue Unpack =
9657 lowerVectorShuffleAsUnpack(MVT::v8i16, DL, V1, V2, Mask, DAG))
9660 // If we can't directly blend but can use PSHUFB, that will be better as it
9661 // can both shuffle and set up the inefficient blend.
9662 if (!IsBlendSupported && Subtarget->hasSSSE3()) {
9663 bool V1InUse, V2InUse;
9664 return lowerVectorShuffleAsPSHUFB(DL, MVT::v8i16, V1, V2, Mask, DAG,
9668 // We can always bit-blend if we have to so the fallback strategy is to
9669 // decompose into single-input permutes and blends.
9670 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
9674 /// \brief Check whether a compaction lowering can be done by dropping even
9675 /// elements and compute how many times even elements must be dropped.
9677 /// This handles shuffles which take every Nth element where N is a power of
9678 /// two. Example shuffle masks:
9680 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9681 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9682 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9683 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9684 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9685 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9687 /// Any of these lanes can of course be undef.
9689 /// This routine only supports N <= 3.
9690 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9693 /// \returns N above, or the number of times even elements must be dropped if
9694 /// there is such a number. Otherwise returns zero.
9695 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9696 // Figure out whether we're looping over two inputs or just one.
9697 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9699 // The modulus for the shuffle vector entries is based on whether this is
9700 // a single input or not.
9701 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9702 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9703 "We should only be called with masks with a power-of-2 size!");
9705 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9707 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9708 // and 2^3 simultaneously. This is because we may have ambiguity with
9709 // partially undef inputs.
9710 bool ViableForN[3] = {true, true, true};
9712 for (int i = 0, e = Mask.size(); i < e; ++i) {
9713 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9718 bool IsAnyViable = false;
9719 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9720 if (ViableForN[j]) {
9723 // The shuffle mask must be equal to (i * 2^N) % M.
9724 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9727 ViableForN[j] = false;
9729 // Early exit if we exhaust the possible powers of two.
9734 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9738 // Return 0 as there is no viable power of two.
9742 /// \brief Generic lowering of v16i8 shuffles.
9744 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9745 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9746 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9747 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9749 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9750 const X86Subtarget *Subtarget,
9751 SelectionDAG &DAG) {
9753 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9754 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9755 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9756 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9757 ArrayRef<int> Mask = SVOp->getMask();
9758 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9760 // Try to use shift instructions.
9762 lowerVectorShuffleAsShift(DL, MVT::v16i8, V1, V2, Mask, DAG))
9765 // Try to use byte rotation instructions.
9766 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9767 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
9770 // Try to use a zext lowering.
9771 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9772 DL, MVT::v16i8, V1, V2, Mask, Subtarget, DAG))
9776 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9778 // For single-input shuffles, there are some nicer lowering tricks we can use.
9779 if (NumV2Elements == 0) {
9780 // Check for being able to broadcast a single element.
9781 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9782 Mask, Subtarget, DAG))
9785 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9786 // Notably, this handles splat and partial-splat shuffles more efficiently.
9787 // However, it only makes sense if the pre-duplication shuffle simplifies
9788 // things significantly. Currently, this means we need to be able to
9789 // express the pre-duplication shuffle as an i16 shuffle.
9791 // FIXME: We should check for other patterns which can be widened into an
9792 // i16 shuffle as well.
9793 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9794 for (int i = 0; i < 16; i += 2)
9795 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9800 auto tryToWidenViaDuplication = [&]() -> SDValue {
9801 if (!canWidenViaDuplication(Mask))
9803 SmallVector<int, 4> LoInputs;
9804 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9805 [](int M) { return M >= 0 && M < 8; });
9806 std::sort(LoInputs.begin(), LoInputs.end());
9807 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9809 SmallVector<int, 4> HiInputs;
9810 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9811 [](int M) { return M >= 8; });
9812 std::sort(HiInputs.begin(), HiInputs.end());
9813 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9816 bool TargetLo = LoInputs.size() >= HiInputs.size();
9817 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9818 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9820 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9821 SmallDenseMap<int, int, 8> LaneMap;
9822 for (int I : InPlaceInputs) {
9823 PreDupI16Shuffle[I/2] = I/2;
9826 int j = TargetLo ? 0 : 4, je = j + 4;
9827 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9828 // Check if j is already a shuffle of this input. This happens when
9829 // there are two adjacent bytes after we move the low one.
9830 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9831 // If we haven't yet mapped the input, search for a slot into which
9833 while (j < je && PreDupI16Shuffle[j] != -1)
9837 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9840 // Map this input with the i16 shuffle.
9841 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9844 // Update the lane map based on the mapping we ended up with.
9845 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9848 ISD::BITCAST, DL, MVT::v16i8,
9849 DAG.getVectorShuffle(MVT::v8i16, DL,
9850 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9851 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9853 // Unpack the bytes to form the i16s that will be shuffled into place.
9854 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9855 MVT::v16i8, V1, V1);
9857 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9858 for (int i = 0; i < 16; ++i)
9859 if (Mask[i] != -1) {
9860 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9861 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9862 if (PostDupI16Shuffle[i / 2] == -1)
9863 PostDupI16Shuffle[i / 2] = MappedMask;
9865 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9866 "Conflicting entrties in the original shuffle!");
9869 ISD::BITCAST, DL, MVT::v16i8,
9870 DAG.getVectorShuffle(MVT::v8i16, DL,
9871 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9872 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9874 if (SDValue V = tryToWidenViaDuplication())
9878 // Use dedicated unpack instructions for masks that match their pattern.
9879 if (isShuffleEquivalent(V1, V2, Mask,
9880 0, 16, 1, 17, 2, 18, 3, 19,
9881 4, 20, 5, 21, 6, 22, 7, 23))
9882 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V1, V2);
9883 if (isShuffleEquivalent(V1, V2, Mask,
9884 8, 24, 9, 25, 10, 26, 11, 27,
9885 12, 28, 13, 29, 14, 30, 15, 31))
9886 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V1, V2);
9888 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9889 // with PSHUFB. It is important to do this before we attempt to generate any
9890 // blends but after all of the single-input lowerings. If the single input
9891 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9892 // want to preserve that and we can DAG combine any longer sequences into
9893 // a PSHUFB in the end. But once we start blending from multiple inputs,
9894 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9895 // and there are *very* few patterns that would actually be faster than the
9896 // PSHUFB approach because of its ability to zero lanes.
9898 // FIXME: The only exceptions to the above are blends which are exact
9899 // interleavings with direct instructions supporting them. We currently don't
9900 // handle those well here.
9901 if (Subtarget->hasSSSE3()) {
9902 bool V1InUse = false;
9903 bool V2InUse = false;
9905 SDValue PSHUFB = lowerVectorShuffleAsPSHUFB(DL, MVT::v16i8, V1, V2, Mask,
9906 DAG, V1InUse, V2InUse);
9908 // If both V1 and V2 are in use and we can use a direct blend or an unpack,
9909 // do so. This avoids using them to handle blends-with-zero which is
9910 // important as a single pshufb is significantly faster for that.
9911 if (V1InUse && V2InUse) {
9912 if (Subtarget->hasSSE41())
9913 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i8, V1, V2,
9914 Mask, Subtarget, DAG))
9917 // We can use an unpack to do the blending rather than an or in some
9918 // cases. Even though the or may be (very minorly) more efficient, we
9919 // preference this lowering because there are common cases where part of
9920 // the complexity of the shuffles goes away when we do the final blend as
9922 // FIXME: It might be worth trying to detect if the unpack-feeding
9923 // shuffles will both be pshufb, in which case we shouldn't bother with
9925 if (SDValue Unpack =
9926 lowerVectorShuffleAsUnpack(MVT::v16i8, DL, V1, V2, Mask, DAG))
9933 // There are special ways we can lower some single-element blends.
9934 if (NumV2Elements == 1)
9935 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
9936 Mask, Subtarget, DAG))
9939 if (SDValue BitBlend =
9940 lowerVectorShuffleAsBitBlend(DL, MVT::v16i8, V1, V2, Mask, DAG))
9943 // Check whether a compaction lowering can be done. This handles shuffles
9944 // which take every Nth element for some even N. See the helper function for
9947 // We special case these as they can be particularly efficiently handled with
9948 // the PACKUSB instruction on x86 and they show up in common patterns of
9949 // rearranging bytes to truncate wide elements.
9950 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
9951 // NumEvenDrops is the power of two stride of the elements. Another way of
9952 // thinking about it is that we need to drop the even elements this many
9953 // times to get the original input.
9954 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9956 // First we need to zero all the dropped bytes.
9957 assert(NumEvenDrops <= 3 &&
9958 "No support for dropping even elements more than 3 times.");
9959 // We use the mask type to pick which bytes are preserved based on how many
9960 // elements are dropped.
9961 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
9962 SDValue ByteClearMask =
9963 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
9964 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
9965 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
9967 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
9969 // Now pack things back together.
9970 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
9971 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
9972 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
9973 for (int i = 1; i < NumEvenDrops; ++i) {
9974 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
9975 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
9981 // Handle multi-input cases by blending single-input shuffles.
9982 if (NumV2Elements > 0)
9983 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v16i8, V1, V2,
9986 // The fallback path for single-input shuffles widens this into two v8i16
9987 // vectors with unpacks, shuffles those, and then pulls them back together
9991 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9992 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9993 for (int i = 0; i < 16; ++i)
9995 (i < 8 ? LoBlendMask[i] : HiBlendMask[i % 8]) = Mask[i];
9997 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
9999 SDValue VLoHalf, VHiHalf;
10000 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
10001 // them out and avoid using UNPCK{L,H} to extract the elements of V as
10003 if (std::none_of(std::begin(LoBlendMask), std::end(LoBlendMask),
10004 [](int M) { return M >= 0 && M % 2 == 1; }) &&
10005 std::none_of(std::begin(HiBlendMask), std::end(HiBlendMask),
10006 [](int M) { return M >= 0 && M % 2 == 1; })) {
10007 // Use a mask to drop the high bytes.
10008 VLoHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
10009 VLoHalf = DAG.getNode(ISD::AND, DL, MVT::v8i16, VLoHalf,
10010 DAG.getConstant(0x00FF, MVT::v8i16));
10012 // This will be a single vector shuffle instead of a blend so nuke VHiHalf.
10013 VHiHalf = DAG.getUNDEF(MVT::v8i16);
10015 // Squash the masks to point directly into VLoHalf.
10016 for (int &M : LoBlendMask)
10019 for (int &M : HiBlendMask)
10023 // Otherwise just unpack the low half of V into VLoHalf and the high half into
10024 // VHiHalf so that we can blend them as i16s.
10025 VLoHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10026 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
10027 VHiHalf = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10028 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
10031 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, LoBlendMask);
10032 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, VLoHalf, VHiHalf, HiBlendMask);
10034 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
10037 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
10039 /// This routine breaks down the specific type of 128-bit shuffle and
10040 /// dispatches to the lowering routines accordingly.
10041 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10042 MVT VT, const X86Subtarget *Subtarget,
10043 SelectionDAG &DAG) {
10044 switch (VT.SimpleTy) {
10046 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10048 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10050 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10052 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10054 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10056 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10059 llvm_unreachable("Unimplemented!");
10063 /// \brief Helper function to test whether a shuffle mask could be
10064 /// simplified by widening the elements being shuffled.
10066 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
10067 /// leaves it in an unspecified state.
10069 /// NOTE: This must handle normal vector shuffle masks and *target* vector
10070 /// shuffle masks. The latter have the special property of a '-2' representing
10071 /// a zero-ed lane of a vector.
10072 static bool canWidenShuffleElements(ArrayRef<int> Mask,
10073 SmallVectorImpl<int> &WidenedMask) {
10074 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
10075 // If both elements are undef, its trivial.
10076 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
10077 WidenedMask.push_back(SM_SentinelUndef);
10081 // Check for an undef mask and a mask value properly aligned to fit with
10082 // a pair of values. If we find such a case, use the non-undef mask's value.
10083 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
10084 WidenedMask.push_back(Mask[i + 1] / 2);
10087 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
10088 WidenedMask.push_back(Mask[i] / 2);
10092 // When zeroing, we need to spread the zeroing across both lanes to widen.
10093 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
10094 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
10095 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
10096 WidenedMask.push_back(SM_SentinelZero);
10102 // Finally check if the two mask values are adjacent and aligned with
10104 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
10105 WidenedMask.push_back(Mask[i] / 2);
10109 // Otherwise we can't safely widen the elements used in this shuffle.
10112 assert(WidenedMask.size() == Mask.size() / 2 &&
10113 "Incorrect size of mask after widening the elements!");
10118 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
10120 /// This routine just extracts two subvectors, shuffles them independently, and
10121 /// then concatenates them back together. This should work effectively with all
10122 /// AVX vector shuffle types.
10123 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10124 SDValue V2, ArrayRef<int> Mask,
10125 SelectionDAG &DAG) {
10126 assert(VT.getSizeInBits() >= 256 &&
10127 "Only for 256-bit or wider vector shuffles!");
10128 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
10129 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
10131 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
10132 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
10134 int NumElements = VT.getVectorNumElements();
10135 int SplitNumElements = NumElements / 2;
10136 MVT ScalarVT = VT.getScalarType();
10137 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
10139 // Rather than splitting build-vectors, just build two narrower build
10140 // vectors. This helps shuffling with splats and zeros.
10141 auto SplitVector = [&](SDValue V) {
10142 while (V.getOpcode() == ISD::BITCAST)
10143 V = V->getOperand(0);
10145 MVT OrigVT = V.getSimpleValueType();
10146 int OrigNumElements = OrigVT.getVectorNumElements();
10147 int OrigSplitNumElements = OrigNumElements / 2;
10148 MVT OrigScalarVT = OrigVT.getScalarType();
10149 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
10153 auto *BV = dyn_cast<BuildVectorSDNode>(V);
10155 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10156 DAG.getIntPtrConstant(0));
10157 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10158 DAG.getIntPtrConstant(OrigSplitNumElements));
10161 SmallVector<SDValue, 16> LoOps, HiOps;
10162 for (int i = 0; i < OrigSplitNumElements; ++i) {
10163 LoOps.push_back(BV->getOperand(i));
10164 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
10166 LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
10167 HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
10169 return std::make_pair(DAG.getNode(ISD::BITCAST, DL, SplitVT, LoV),
10170 DAG.getNode(ISD::BITCAST, DL, SplitVT, HiV));
10173 SDValue LoV1, HiV1, LoV2, HiV2;
10174 std::tie(LoV1, HiV1) = SplitVector(V1);
10175 std::tie(LoV2, HiV2) = SplitVector(V2);
10177 // Now create two 4-way blends of these half-width vectors.
10178 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10179 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10180 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
10181 for (int i = 0; i < SplitNumElements; ++i) {
10182 int M = HalfMask[i];
10183 if (M >= NumElements) {
10184 if (M >= NumElements + SplitNumElements)
10188 V2BlendMask.push_back(M - NumElements);
10189 V1BlendMask.push_back(-1);
10190 BlendMask.push_back(SplitNumElements + i);
10191 } else if (M >= 0) {
10192 if (M >= SplitNumElements)
10196 V2BlendMask.push_back(-1);
10197 V1BlendMask.push_back(M);
10198 BlendMask.push_back(i);
10200 V2BlendMask.push_back(-1);
10201 V1BlendMask.push_back(-1);
10202 BlendMask.push_back(-1);
10206 // Because the lowering happens after all combining takes place, we need to
10207 // manually combine these blend masks as much as possible so that we create
10208 // a minimal number of high-level vector shuffle nodes.
10210 // First try just blending the halves of V1 or V2.
10211 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10212 return DAG.getUNDEF(SplitVT);
10213 if (!UseLoV2 && !UseHiV2)
10214 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10215 if (!UseLoV1 && !UseHiV1)
10216 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10218 SDValue V1Blend, V2Blend;
10219 if (UseLoV1 && UseHiV1) {
10221 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10223 // We only use half of V1 so map the usage down into the final blend mask.
10224 V1Blend = UseLoV1 ? LoV1 : HiV1;
10225 for (int i = 0; i < SplitNumElements; ++i)
10226 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10227 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10229 if (UseLoV2 && UseHiV2) {
10231 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10233 // We only use half of V2 so map the usage down into the final blend mask.
10234 V2Blend = UseLoV2 ? LoV2 : HiV2;
10235 for (int i = 0; i < SplitNumElements; ++i)
10236 if (BlendMask[i] >= SplitNumElements)
10237 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10239 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10241 SDValue Lo = HalfBlend(LoMask);
10242 SDValue Hi = HalfBlend(HiMask);
10243 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10246 /// \brief Either split a vector in halves or decompose the shuffles and the
10249 /// This is provided as a good fallback for many lowerings of non-single-input
10250 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10251 /// between splitting the shuffle into 128-bit components and stitching those
10252 /// back together vs. extracting the single-input shuffles and blending those
10254 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10255 SDValue V2, ArrayRef<int> Mask,
10256 SelectionDAG &DAG) {
10257 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10258 "lower single-input shuffles as it "
10259 "could then recurse on itself.");
10260 int Size = Mask.size();
10262 // If this can be modeled as a broadcast of two elements followed by a blend,
10263 // prefer that lowering. This is especially important because broadcasts can
10264 // often fold with memory operands.
10265 auto DoBothBroadcast = [&] {
10266 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10269 if (V2BroadcastIdx == -1)
10270 V2BroadcastIdx = M - Size;
10271 else if (M - Size != V2BroadcastIdx)
10273 } else if (M >= 0) {
10274 if (V1BroadcastIdx == -1)
10275 V1BroadcastIdx = M;
10276 else if (M != V1BroadcastIdx)
10281 if (DoBothBroadcast())
10282 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10285 // If the inputs all stem from a single 128-bit lane of each input, then we
10286 // split them rather than blending because the split will decompose to
10287 // unusually few instructions.
10288 int LaneCount = VT.getSizeInBits() / 128;
10289 int LaneSize = Size / LaneCount;
10290 SmallBitVector LaneInputs[2];
10291 LaneInputs[0].resize(LaneCount, false);
10292 LaneInputs[1].resize(LaneCount, false);
10293 for (int i = 0; i < Size; ++i)
10295 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10296 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10297 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10299 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10300 // that the decomposed single-input shuffles don't end up here.
10301 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10304 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10305 /// a permutation and blend of those lanes.
10307 /// This essentially blends the out-of-lane inputs to each lane into the lane
10308 /// from a permuted copy of the vector. This lowering strategy results in four
10309 /// instructions in the worst case for a single-input cross lane shuffle which
10310 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10311 /// of. Special cases for each particular shuffle pattern should be handled
10312 /// prior to trying this lowering.
10313 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10314 SDValue V1, SDValue V2,
10315 ArrayRef<int> Mask,
10316 SelectionDAG &DAG) {
10317 // FIXME: This should probably be generalized for 512-bit vectors as well.
10318 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10319 int LaneSize = Mask.size() / 2;
10321 // If there are only inputs from one 128-bit lane, splitting will in fact be
10322 // less expensive. The flags track wether the given lane contains an element
10323 // that crosses to another lane.
10324 bool LaneCrossing[2] = {false, false};
10325 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10326 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10327 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10328 if (!LaneCrossing[0] || !LaneCrossing[1])
10329 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10331 if (isSingleInputShuffleMask(Mask)) {
10332 SmallVector<int, 32> FlippedBlendMask;
10333 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10334 FlippedBlendMask.push_back(
10335 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10337 : Mask[i] % LaneSize +
10338 (i / LaneSize) * LaneSize + Size));
10340 // Flip the vector, and blend the results which should now be in-lane. The
10341 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10342 // 5 for the high source. The value 3 selects the high half of source 2 and
10343 // the value 2 selects the low half of source 2. We only use source 2 to
10344 // allow folding it into a memory operand.
10345 unsigned PERMMask = 3 | 2 << 4;
10346 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10347 V1, DAG.getConstant(PERMMask, MVT::i8));
10348 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10351 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10352 // will be handled by the above logic and a blend of the results, much like
10353 // other patterns in AVX.
10354 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10357 /// \brief Handle lowering 2-lane 128-bit shuffles.
10358 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10359 SDValue V2, ArrayRef<int> Mask,
10360 const X86Subtarget *Subtarget,
10361 SelectionDAG &DAG) {
10362 // Blends are faster and handle all the non-lane-crossing cases.
10363 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10367 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10368 VT.getVectorNumElements() / 2);
10369 // Check for patterns which can be matched with a single insert of a 128-bit
10371 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 0, 1) ||
10372 isShuffleEquivalent(V1, V2, Mask, 0, 1, 4, 5)) {
10373 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10374 DAG.getIntPtrConstant(0));
10375 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10376 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10377 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10379 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 6, 7)) {
10380 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10381 DAG.getIntPtrConstant(0));
10382 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10383 DAG.getIntPtrConstant(2));
10384 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10387 // Otherwise form a 128-bit permutation.
10388 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10389 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10390 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10391 DAG.getConstant(PermMask, MVT::i8));
10394 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10395 /// shuffling each lane.
10397 /// This will only succeed when the result of fixing the 128-bit lanes results
10398 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10399 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10400 /// the lane crosses early and then use simpler shuffles within each lane.
10402 /// FIXME: It might be worthwhile at some point to support this without
10403 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10404 /// in x86 only floating point has interesting non-repeating shuffles, and even
10405 /// those are still *marginally* more expensive.
10406 static SDValue lowerVectorShuffleByMerging128BitLanes(
10407 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10408 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10409 assert(!isSingleInputShuffleMask(Mask) &&
10410 "This is only useful with multiple inputs.");
10412 int Size = Mask.size();
10413 int LaneSize = 128 / VT.getScalarSizeInBits();
10414 int NumLanes = Size / LaneSize;
10415 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10417 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10418 // check whether the in-128-bit lane shuffles share a repeating pattern.
10419 SmallVector<int, 4> Lanes;
10420 Lanes.resize(NumLanes, -1);
10421 SmallVector<int, 4> InLaneMask;
10422 InLaneMask.resize(LaneSize, -1);
10423 for (int i = 0; i < Size; ++i) {
10427 int j = i / LaneSize;
10429 if (Lanes[j] < 0) {
10430 // First entry we've seen for this lane.
10431 Lanes[j] = Mask[i] / LaneSize;
10432 } else if (Lanes[j] != Mask[i] / LaneSize) {
10433 // This doesn't match the lane selected previously!
10437 // Check that within each lane we have a consistent shuffle mask.
10438 int k = i % LaneSize;
10439 if (InLaneMask[k] < 0) {
10440 InLaneMask[k] = Mask[i] % LaneSize;
10441 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10442 // This doesn't fit a repeating in-lane mask.
10447 // First shuffle the lanes into place.
10448 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10449 VT.getSizeInBits() / 64);
10450 SmallVector<int, 8> LaneMask;
10451 LaneMask.resize(NumLanes * 2, -1);
10452 for (int i = 0; i < NumLanes; ++i)
10453 if (Lanes[i] >= 0) {
10454 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10455 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10458 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10459 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10460 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10462 // Cast it back to the type we actually want.
10463 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10465 // Now do a simple shuffle that isn't lane crossing.
10466 SmallVector<int, 8> NewMask;
10467 NewMask.resize(Size, -1);
10468 for (int i = 0; i < Size; ++i)
10470 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10471 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10472 "Must not introduce lane crosses at this point!");
10474 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10477 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10480 /// This returns true if the elements from a particular input are already in the
10481 /// slot required by the given mask and require no permutation.
10482 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10483 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10484 int Size = Mask.size();
10485 for (int i = 0; i < Size; ++i)
10486 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10492 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10494 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10495 /// isn't available.
10496 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10497 const X86Subtarget *Subtarget,
10498 SelectionDAG &DAG) {
10500 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10501 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10502 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10503 ArrayRef<int> Mask = SVOp->getMask();
10504 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10506 SmallVector<int, 4> WidenedMask;
10507 if (canWidenShuffleElements(Mask, WidenedMask))
10508 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10511 if (isSingleInputShuffleMask(Mask)) {
10512 // Check for being able to broadcast a single element.
10513 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10514 Mask, Subtarget, DAG))
10517 // Use low duplicate instructions for masks that match their pattern.
10518 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
10519 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10521 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10522 // Non-half-crossing single input shuffles can be lowerid with an
10523 // interleaved permutation.
10524 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10525 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10526 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10527 DAG.getConstant(VPERMILPMask, MVT::i8));
10530 // With AVX2 we have direct support for this permutation.
10531 if (Subtarget->hasAVX2())
10532 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10533 getV4X86ShuffleImm8ForMask(Mask, DAG));
10535 // Otherwise, fall back.
10536 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10540 // X86 has dedicated unpack instructions that can handle specific blend
10541 // operations: UNPCKH and UNPCKL.
10542 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10543 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10544 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10545 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10546 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 6, 2))
10547 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V2, V1);
10548 if (isShuffleEquivalent(V1, V2, Mask, 5, 1, 7, 3))
10549 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V2, V1);
10551 // If we have a single input to the zero element, insert that into V1 if we
10552 // can do so cheaply.
10553 int NumV2Elements =
10554 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10555 if (NumV2Elements == 1 && Mask[0] >= 4)
10556 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10557 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10560 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10564 // Check if the blend happens to exactly fit that of SHUFPD.
10565 if ((Mask[0] == -1 || Mask[0] < 2) &&
10566 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10567 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10568 (Mask[3] == -1 || Mask[3] >= 6)) {
10569 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10570 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10571 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10572 DAG.getConstant(SHUFPDMask, MVT::i8));
10574 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10575 (Mask[1] == -1 || Mask[1] < 2) &&
10576 (Mask[2] == -1 || Mask[2] >= 6) &&
10577 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10578 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10579 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10580 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10581 DAG.getConstant(SHUFPDMask, MVT::i8));
10584 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10585 // shuffle. However, if we have AVX2 and either inputs are already in place,
10586 // we will be able to shuffle even across lanes the other input in a single
10587 // instruction so skip this pattern.
10588 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10589 isShuffleMaskInputInPlace(1, Mask))))
10590 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10591 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10594 // If we have AVX2 then we always want to lower with a blend because an v4 we
10595 // can fully permute the elements.
10596 if (Subtarget->hasAVX2())
10597 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10600 // Otherwise fall back on generic lowering.
10601 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10604 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10606 /// This routine is only called when we have AVX2 and thus a reasonable
10607 /// instruction set for v4i64 shuffling..
10608 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10609 const X86Subtarget *Subtarget,
10610 SelectionDAG &DAG) {
10612 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10613 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10614 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10615 ArrayRef<int> Mask = SVOp->getMask();
10616 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10617 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10619 SmallVector<int, 4> WidenedMask;
10620 if (canWidenShuffleElements(Mask, WidenedMask))
10621 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10624 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10628 // Check for being able to broadcast a single element.
10629 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10630 Mask, Subtarget, DAG))
10633 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10634 // use lower latency instructions that will operate on both 128-bit lanes.
10635 SmallVector<int, 2> RepeatedMask;
10636 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10637 if (isSingleInputShuffleMask(Mask)) {
10638 int PSHUFDMask[] = {-1, -1, -1, -1};
10639 for (int i = 0; i < 2; ++i)
10640 if (RepeatedMask[i] >= 0) {
10641 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10642 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10644 return DAG.getNode(
10645 ISD::BITCAST, DL, MVT::v4i64,
10646 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10647 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10648 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10652 // AVX2 provides a direct instruction for permuting a single input across
10654 if (isSingleInputShuffleMask(Mask))
10655 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10656 getV4X86ShuffleImm8ForMask(Mask, DAG));
10658 // Try to use shift instructions.
10659 if (SDValue Shift =
10660 lowerVectorShuffleAsShift(DL, MVT::v4i64, V1, V2, Mask, DAG))
10663 // Use dedicated unpack instructions for masks that match their pattern.
10664 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10665 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10666 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10667 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10668 if (isShuffleEquivalent(V1, V2, Mask, 4, 0, 6, 2))
10669 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V2, V1);
10670 if (isShuffleEquivalent(V1, V2, Mask, 5, 1, 7, 3))
10671 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V2, V1);
10673 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10674 // shuffle. However, if we have AVX2 and either inputs are already in place,
10675 // we will be able to shuffle even across lanes the other input in a single
10676 // instruction so skip this pattern.
10677 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10678 isShuffleMaskInputInPlace(1, Mask))))
10679 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10680 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10683 // Otherwise fall back on generic blend lowering.
10684 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10688 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10690 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10691 /// isn't available.
10692 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10693 const X86Subtarget *Subtarget,
10694 SelectionDAG &DAG) {
10696 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10697 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10698 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10699 ArrayRef<int> Mask = SVOp->getMask();
10700 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10702 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10706 // Check for being able to broadcast a single element.
10707 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10708 Mask, Subtarget, DAG))
10711 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10712 // options to efficiently lower the shuffle.
10713 SmallVector<int, 4> RepeatedMask;
10714 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10715 assert(RepeatedMask.size() == 4 &&
10716 "Repeated masks must be half the mask width!");
10718 // Use even/odd duplicate instructions for masks that match their pattern.
10719 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10720 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10721 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10722 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10724 if (isSingleInputShuffleMask(Mask))
10725 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10726 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10728 // Use dedicated unpack instructions for masks that match their pattern.
10729 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10730 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10731 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10732 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10733 if (isShuffleEquivalent(V1, V2, Mask, 8, 0, 9, 1, 12, 4, 13, 5))
10734 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V2, V1);
10735 if (isShuffleEquivalent(V1, V2, Mask, 10, 2, 11, 3, 14, 6, 15, 7))
10736 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V2, V1);
10738 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10739 // have already handled any direct blends. We also need to squash the
10740 // repeated mask into a simulated v4f32 mask.
10741 for (int i = 0; i < 4; ++i)
10742 if (RepeatedMask[i] >= 8)
10743 RepeatedMask[i] -= 4;
10744 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10747 // If we have a single input shuffle with different shuffle patterns in the
10748 // two 128-bit lanes use the variable mask to VPERMILPS.
10749 if (isSingleInputShuffleMask(Mask)) {
10750 SDValue VPermMask[8];
10751 for (int i = 0; i < 8; ++i)
10752 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10753 : DAG.getConstant(Mask[i], MVT::i32);
10754 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10755 return DAG.getNode(
10756 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10757 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10759 if (Subtarget->hasAVX2())
10760 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10761 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10762 DAG.getNode(ISD::BUILD_VECTOR, DL,
10763 MVT::v8i32, VPermMask)),
10766 // Otherwise, fall back.
10767 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10771 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10773 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10774 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10777 // If we have AVX2 then we always want to lower with a blend because at v8 we
10778 // can fully permute the elements.
10779 if (Subtarget->hasAVX2())
10780 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10783 // Otherwise fall back on generic lowering.
10784 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10787 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10789 /// This routine is only called when we have AVX2 and thus a reasonable
10790 /// instruction set for v8i32 shuffling..
10791 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10792 const X86Subtarget *Subtarget,
10793 SelectionDAG &DAG) {
10795 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10796 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10797 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10798 ArrayRef<int> Mask = SVOp->getMask();
10799 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10800 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10802 // Whenever we can lower this as a zext, that instruction is strictly faster
10803 // than any alternative. It also allows us to fold memory operands into the
10804 // shuffle in many cases.
10805 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
10806 Mask, Subtarget, DAG))
10809 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10813 // Check for being able to broadcast a single element.
10814 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10815 Mask, Subtarget, DAG))
10818 // If the shuffle mask is repeated in each 128-bit lane we can use more
10819 // efficient instructions that mirror the shuffles across the two 128-bit
10821 SmallVector<int, 4> RepeatedMask;
10822 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10823 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10824 if (isSingleInputShuffleMask(Mask))
10825 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10826 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10828 // Use dedicated unpack instructions for masks that match their pattern.
10829 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10830 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10831 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10832 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10833 if (isShuffleEquivalent(V1, V2, Mask, 8, 0, 9, 1, 12, 4, 13, 5))
10834 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V2, V1);
10835 if (isShuffleEquivalent(V1, V2, Mask, 10, 2, 11, 3, 14, 6, 15, 7))
10836 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V2, V1);
10839 // Try to use shift instructions.
10840 if (SDValue Shift =
10841 lowerVectorShuffleAsShift(DL, MVT::v8i32, V1, V2, Mask, DAG))
10844 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10845 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10848 // If the shuffle patterns aren't repeated but it is a single input, directly
10849 // generate a cross-lane VPERMD instruction.
10850 if (isSingleInputShuffleMask(Mask)) {
10851 SDValue VPermMask[8];
10852 for (int i = 0; i < 8; ++i)
10853 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10854 : DAG.getConstant(Mask[i], MVT::i32);
10855 return DAG.getNode(
10856 X86ISD::VPERMV, DL, MVT::v8i32,
10857 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10860 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10862 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10863 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10866 // Otherwise fall back on generic blend lowering.
10867 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10871 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10873 /// This routine is only called when we have AVX2 and thus a reasonable
10874 /// instruction set for v16i16 shuffling..
10875 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10876 const X86Subtarget *Subtarget,
10877 SelectionDAG &DAG) {
10879 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10880 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10881 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10882 ArrayRef<int> Mask = SVOp->getMask();
10883 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10884 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10886 // Whenever we can lower this as a zext, that instruction is strictly faster
10887 // than any alternative. It also allows us to fold memory operands into the
10888 // shuffle in many cases.
10889 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
10890 Mask, Subtarget, DAG))
10893 // Check for being able to broadcast a single element.
10894 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
10895 Mask, Subtarget, DAG))
10898 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10902 // Use dedicated unpack instructions for masks that match their pattern.
10903 if (isShuffleEquivalent(V1, V2, Mask,
10904 // First 128-bit lane:
10905 0, 16, 1, 17, 2, 18, 3, 19,
10906 // Second 128-bit lane:
10907 8, 24, 9, 25, 10, 26, 11, 27))
10908 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
10909 if (isShuffleEquivalent(V1, V2, Mask,
10910 // First 128-bit lane:
10911 4, 20, 5, 21, 6, 22, 7, 23,
10912 // Second 128-bit lane:
10913 12, 28, 13, 29, 14, 30, 15, 31))
10914 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
10916 // Try to use shift instructions.
10917 if (SDValue Shift =
10918 lowerVectorShuffleAsShift(DL, MVT::v16i16, V1, V2, Mask, DAG))
10921 // Try to use byte rotation instructions.
10922 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10923 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10926 if (isSingleInputShuffleMask(Mask)) {
10927 // There are no generalized cross-lane shuffle operations available on i16
10929 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
10930 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
10933 SDValue PSHUFBMask[32];
10934 for (int i = 0; i < 16; ++i) {
10935 if (Mask[i] == -1) {
10936 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
10940 int M = i < 8 ? Mask[i] : Mask[i] - 8;
10941 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
10942 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
10943 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
10945 return DAG.getNode(
10946 ISD::BITCAST, DL, MVT::v16i16,
10948 X86ISD::PSHUFB, DL, MVT::v32i8,
10949 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
10950 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
10953 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10955 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10956 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10959 // Otherwise fall back on generic lowering.
10960 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
10963 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
10965 /// This routine is only called when we have AVX2 and thus a reasonable
10966 /// instruction set for v32i8 shuffling..
10967 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10968 const X86Subtarget *Subtarget,
10969 SelectionDAG &DAG) {
10971 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10972 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10973 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10974 ArrayRef<int> Mask = SVOp->getMask();
10975 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10976 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
10978 // Whenever we can lower this as a zext, that instruction is strictly faster
10979 // than any alternative. It also allows us to fold memory operands into the
10980 // shuffle in many cases.
10981 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
10982 Mask, Subtarget, DAG))
10985 // Check for being able to broadcast a single element.
10986 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
10987 Mask, Subtarget, DAG))
10990 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
10994 // Use dedicated unpack instructions for masks that match their pattern.
10995 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
10997 if (isShuffleEquivalent(
10999 // First 128-bit lane:
11000 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
11001 // Second 128-bit lane:
11002 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
11003 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
11004 if (isShuffleEquivalent(
11006 // First 128-bit lane:
11007 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
11008 // Second 128-bit lane:
11009 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
11010 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
11012 // Try to use shift instructions.
11013 if (SDValue Shift =
11014 lowerVectorShuffleAsShift(DL, MVT::v32i8, V1, V2, Mask, DAG))
11017 // Try to use byte rotation instructions.
11018 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11019 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11022 if (isSingleInputShuffleMask(Mask)) {
11023 // There are no generalized cross-lane shuffle operations available on i8
11025 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
11026 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
11029 SDValue PSHUFBMask[32];
11030 for (int i = 0; i < 32; ++i)
11033 ? DAG.getUNDEF(MVT::i8)
11034 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
11036 return DAG.getNode(
11037 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
11038 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
11041 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11043 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11044 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11047 // Otherwise fall back on generic lowering.
11048 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
11051 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
11053 /// This routine either breaks down the specific type of a 256-bit x86 vector
11054 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
11055 /// together based on the available instructions.
11056 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11057 MVT VT, const X86Subtarget *Subtarget,
11058 SelectionDAG &DAG) {
11060 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11061 ArrayRef<int> Mask = SVOp->getMask();
11063 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
11064 // check for those subtargets here and avoid much of the subtarget querying in
11065 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
11066 // ability to manipulate a 256-bit vector with integer types. Since we'll use
11067 // floating point types there eventually, just immediately cast everything to
11068 // a float and operate entirely in that domain.
11069 if (VT.isInteger() && !Subtarget->hasAVX2()) {
11070 int ElementBits = VT.getScalarSizeInBits();
11071 if (ElementBits < 32)
11072 // No floating point type available, decompose into 128-bit vectors.
11073 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11075 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
11076 VT.getVectorNumElements());
11077 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
11078 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
11079 return DAG.getNode(ISD::BITCAST, DL, VT,
11080 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
11083 switch (VT.SimpleTy) {
11085 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11087 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11089 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11091 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11093 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11095 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11098 llvm_unreachable("Not a valid 256-bit x86 vector type!");
11102 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
11103 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11104 const X86Subtarget *Subtarget,
11105 SelectionDAG &DAG) {
11107 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11108 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11109 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11110 ArrayRef<int> Mask = SVOp->getMask();
11111 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11113 // X86 has dedicated unpack instructions that can handle specific blend
11114 // operations: UNPCKH and UNPCKL.
11115 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11116 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
11117 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11118 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
11120 // FIXME: Implement direct support for this type!
11121 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
11124 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
11125 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11126 const X86Subtarget *Subtarget,
11127 SelectionDAG &DAG) {
11129 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11130 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11131 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11132 ArrayRef<int> Mask = SVOp->getMask();
11133 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11135 // Use dedicated unpack instructions for masks that match their pattern.
11136 if (isShuffleEquivalent(V1, V2, Mask,
11137 0, 16, 1, 17, 4, 20, 5, 21,
11138 8, 24, 9, 25, 12, 28, 13, 29))
11139 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
11140 if (isShuffleEquivalent(V1, V2, Mask,
11141 2, 18, 3, 19, 6, 22, 7, 23,
11142 10, 26, 11, 27, 14, 30, 15, 31))
11143 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
11145 // FIXME: Implement direct support for this type!
11146 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
11149 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
11150 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11151 const X86Subtarget *Subtarget,
11152 SelectionDAG &DAG) {
11154 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11155 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11156 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11157 ArrayRef<int> Mask = SVOp->getMask();
11158 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11160 // X86 has dedicated unpack instructions that can handle specific blend
11161 // operations: UNPCKH and UNPCKL.
11162 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11163 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
11164 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11165 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
11167 // FIXME: Implement direct support for this type!
11168 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
11171 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11172 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11173 const X86Subtarget *Subtarget,
11174 SelectionDAG &DAG) {
11176 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11177 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11178 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11179 ArrayRef<int> Mask = SVOp->getMask();
11180 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11182 // Use dedicated unpack instructions for masks that match their pattern.
11183 if (isShuffleEquivalent(V1, V2, Mask,
11184 0, 16, 1, 17, 4, 20, 5, 21,
11185 8, 24, 9, 25, 12, 28, 13, 29))
11186 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
11187 if (isShuffleEquivalent(V1, V2, Mask,
11188 2, 18, 3, 19, 6, 22, 7, 23,
11189 10, 26, 11, 27, 14, 30, 15, 31))
11190 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
11192 // FIXME: Implement direct support for this type!
11193 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
11196 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11197 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11198 const X86Subtarget *Subtarget,
11199 SelectionDAG &DAG) {
11201 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11202 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11203 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11204 ArrayRef<int> Mask = SVOp->getMask();
11205 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11206 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
11208 // FIXME: Implement direct support for this type!
11209 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
11212 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
11213 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11214 const X86Subtarget *Subtarget,
11215 SelectionDAG &DAG) {
11217 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11218 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11219 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11220 ArrayRef<int> Mask = SVOp->getMask();
11221 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
11222 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
11224 // FIXME: Implement direct support for this type!
11225 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
11228 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
11230 /// This routine either breaks down the specific type of a 512-bit x86 vector
11231 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
11232 /// together based on the available instructions.
11233 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11234 MVT VT, const X86Subtarget *Subtarget,
11235 SelectionDAG &DAG) {
11237 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11238 ArrayRef<int> Mask = SVOp->getMask();
11239 assert(Subtarget->hasAVX512() &&
11240 "Cannot lower 512-bit vectors w/ basic ISA!");
11242 // Check for being able to broadcast a single element.
11243 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
11244 Mask, Subtarget, DAG))
11247 // Dispatch to each element type for lowering. If we don't have supprot for
11248 // specific element type shuffles at 512 bits, immediately split them and
11249 // lower them. Each lowering routine of a given type is allowed to assume that
11250 // the requisite ISA extensions for that element type are available.
11251 switch (VT.SimpleTy) {
11253 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11255 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11257 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11259 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11261 if (Subtarget->hasBWI())
11262 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11265 if (Subtarget->hasBWI())
11266 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11270 llvm_unreachable("Not a valid 512-bit x86 vector type!");
11273 // Otherwise fall back on splitting.
11274 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11277 /// \brief Top-level lowering for x86 vector shuffles.
11279 /// This handles decomposition, canonicalization, and lowering of all x86
11280 /// vector shuffles. Most of the specific lowering strategies are encapsulated
11281 /// above in helper routines. The canonicalization attempts to widen shuffles
11282 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
11283 /// s.t. only one of the two inputs needs to be tested, etc.
11284 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
11285 SelectionDAG &DAG) {
11286 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11287 ArrayRef<int> Mask = SVOp->getMask();
11288 SDValue V1 = Op.getOperand(0);
11289 SDValue V2 = Op.getOperand(1);
11290 MVT VT = Op.getSimpleValueType();
11291 int NumElements = VT.getVectorNumElements();
11294 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
11296 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
11297 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11298 if (V1IsUndef && V2IsUndef)
11299 return DAG.getUNDEF(VT);
11301 // When we create a shuffle node we put the UNDEF node to second operand,
11302 // but in some cases the first operand may be transformed to UNDEF.
11303 // In this case we should just commute the node.
11305 return DAG.getCommutedVectorShuffle(*SVOp);
11307 // Check for non-undef masks pointing at an undef vector and make the masks
11308 // undef as well. This makes it easier to match the shuffle based solely on
11312 if (M >= NumElements) {
11313 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11314 for (int &M : NewMask)
11315 if (M >= NumElements)
11317 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11320 // We actually see shuffles that are entirely re-arrangements of a set of
11321 // zero inputs. This mostly happens while decomposing complex shuffles into
11322 // simple ones. Directly lower these as a buildvector of zeros.
11323 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
11324 if (Zeroable.all())
11325 return getZeroVector(VT, Subtarget, DAG, dl);
11327 // Try to collapse shuffles into using a vector type with fewer elements but
11328 // wider element types. We cap this to not form integers or floating point
11329 // elements wider than 64 bits, but it might be interesting to form i128
11330 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11331 SmallVector<int, 16> WidenedMask;
11332 if (VT.getScalarSizeInBits() < 64 &&
11333 canWidenShuffleElements(Mask, WidenedMask)) {
11334 MVT NewEltVT = VT.isFloatingPoint()
11335 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11336 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11337 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11338 // Make sure that the new vector type is legal. For example, v2f64 isn't
11340 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11341 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11342 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11343 return DAG.getNode(ISD::BITCAST, dl, VT,
11344 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11348 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11349 for (int M : SVOp->getMask())
11351 ++NumUndefElements;
11352 else if (M < NumElements)
11357 // Commute the shuffle as needed such that more elements come from V1 than
11358 // V2. This allows us to match the shuffle pattern strictly on how many
11359 // elements come from V1 without handling the symmetric cases.
11360 if (NumV2Elements > NumV1Elements)
11361 return DAG.getCommutedVectorShuffle(*SVOp);
11363 // When the number of V1 and V2 elements are the same, try to minimize the
11364 // number of uses of V2 in the low half of the vector. When that is tied,
11365 // ensure that the sum of indices for V1 is equal to or lower than the sum
11366 // indices for V2. When those are equal, try to ensure that the number of odd
11367 // indices for V1 is lower than the number of odd indices for V2.
11368 if (NumV1Elements == NumV2Elements) {
11369 int LowV1Elements = 0, LowV2Elements = 0;
11370 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11371 if (M >= NumElements)
11375 if (LowV2Elements > LowV1Elements) {
11376 return DAG.getCommutedVectorShuffle(*SVOp);
11377 } else if (LowV2Elements == LowV1Elements) {
11378 int SumV1Indices = 0, SumV2Indices = 0;
11379 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11380 if (SVOp->getMask()[i] >= NumElements)
11382 else if (SVOp->getMask()[i] >= 0)
11384 if (SumV2Indices < SumV1Indices) {
11385 return DAG.getCommutedVectorShuffle(*SVOp);
11386 } else if (SumV2Indices == SumV1Indices) {
11387 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11388 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11389 if (SVOp->getMask()[i] >= NumElements)
11390 NumV2OddIndices += i % 2;
11391 else if (SVOp->getMask()[i] >= 0)
11392 NumV1OddIndices += i % 2;
11393 if (NumV2OddIndices < NumV1OddIndices)
11394 return DAG.getCommutedVectorShuffle(*SVOp);
11399 // For each vector width, delegate to a specialized lowering routine.
11400 if (VT.getSizeInBits() == 128)
11401 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11403 if (VT.getSizeInBits() == 256)
11404 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11406 // Force AVX-512 vectors to be scalarized for now.
11407 // FIXME: Implement AVX-512 support!
11408 if (VT.getSizeInBits() == 512)
11409 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11411 llvm_unreachable("Unimplemented!");
11415 //===----------------------------------------------------------------------===//
11416 // Legacy vector shuffle lowering
11418 // This code is the legacy code handling vector shuffles until the above
11419 // replaces its functionality and performance.
11420 //===----------------------------------------------------------------------===//
11422 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11423 bool hasInt256, unsigned *MaskOut = nullptr) {
11424 MVT EltVT = VT.getVectorElementType();
11426 // There is no blend with immediate in AVX-512.
11427 if (VT.is512BitVector())
11430 if (!hasSSE41 || EltVT == MVT::i8)
11432 if (!hasInt256 && VT == MVT::v16i16)
11435 unsigned MaskValue = 0;
11436 unsigned NumElems = VT.getVectorNumElements();
11437 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11438 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11439 unsigned NumElemsInLane = NumElems / NumLanes;
11441 // Blend for v16i16 should be symmetric for both lanes.
11442 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11444 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11445 int EltIdx = MaskVals[i];
11447 if ((EltIdx < 0 || EltIdx == (int)i) &&
11448 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11451 if (((unsigned)EltIdx == (i + NumElems)) &&
11452 (SndLaneEltIdx < 0 ||
11453 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11454 MaskValue |= (1 << i);
11460 *MaskOut = MaskValue;
11464 // Try to lower a shuffle node into a simple blend instruction.
11465 // This function assumes isBlendMask returns true for this
11466 // SuffleVectorSDNode
11467 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11468 unsigned MaskValue,
11469 const X86Subtarget *Subtarget,
11470 SelectionDAG &DAG) {
11471 MVT VT = SVOp->getSimpleValueType(0);
11472 MVT EltVT = VT.getVectorElementType();
11473 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11474 Subtarget->hasInt256() && "Trying to lower a "
11475 "VECTOR_SHUFFLE to a Blend but "
11476 "with the wrong mask"));
11477 SDValue V1 = SVOp->getOperand(0);
11478 SDValue V2 = SVOp->getOperand(1);
11480 unsigned NumElems = VT.getVectorNumElements();
11482 // Convert i32 vectors to floating point if it is not AVX2.
11483 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11485 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11486 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11488 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11489 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11492 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11493 DAG.getConstant(MaskValue, MVT::i32));
11494 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11497 /// In vector type \p VT, return true if the element at index \p InputIdx
11498 /// falls on a different 128-bit lane than \p OutputIdx.
11499 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11500 unsigned OutputIdx) {
11501 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11502 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11505 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11506 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11507 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11508 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11510 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11511 SelectionDAG &DAG) {
11512 MVT VT = V1.getSimpleValueType();
11513 assert(VT.is128BitVector() || VT.is256BitVector());
11515 MVT EltVT = VT.getVectorElementType();
11516 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11517 unsigned NumElts = VT.getVectorNumElements();
11519 SmallVector<SDValue, 32> PshufbMask;
11520 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11521 int InputIdx = MaskVals[OutputIdx];
11522 unsigned InputByteIdx;
11524 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11525 InputByteIdx = 0x80;
11527 // Cross lane is not allowed.
11528 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11530 InputByteIdx = InputIdx * EltSizeInBytes;
11531 // Index is an byte offset within the 128-bit lane.
11532 InputByteIdx &= 0xf;
11535 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11536 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11537 if (InputByteIdx != 0x80)
11542 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11544 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11545 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11546 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11549 // v8i16 shuffles - Prefer shuffles in the following order:
11550 // 1. [all] pshuflw, pshufhw, optional move
11551 // 2. [ssse3] 1 x pshufb
11552 // 3. [ssse3] 2 x pshufb + 1 x por
11553 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11555 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11556 SelectionDAG &DAG) {
11557 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11558 SDValue V1 = SVOp->getOperand(0);
11559 SDValue V2 = SVOp->getOperand(1);
11561 SmallVector<int, 8> MaskVals;
11563 // Determine if more than 1 of the words in each of the low and high quadwords
11564 // of the result come from the same quadword of one of the two inputs. Undef
11565 // mask values count as coming from any quadword, for better codegen.
11567 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11568 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11569 unsigned LoQuad[] = { 0, 0, 0, 0 };
11570 unsigned HiQuad[] = { 0, 0, 0, 0 };
11571 // Indices of quads used.
11572 std::bitset<4> InputQuads;
11573 for (unsigned i = 0; i < 8; ++i) {
11574 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11575 int EltIdx = SVOp->getMaskElt(i);
11576 MaskVals.push_back(EltIdx);
11584 ++Quad[EltIdx / 4];
11585 InputQuads.set(EltIdx / 4);
11588 int BestLoQuad = -1;
11589 unsigned MaxQuad = 1;
11590 for (unsigned i = 0; i < 4; ++i) {
11591 if (LoQuad[i] > MaxQuad) {
11593 MaxQuad = LoQuad[i];
11597 int BestHiQuad = -1;
11599 for (unsigned i = 0; i < 4; ++i) {
11600 if (HiQuad[i] > MaxQuad) {
11602 MaxQuad = HiQuad[i];
11606 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11607 // of the two input vectors, shuffle them into one input vector so only a
11608 // single pshufb instruction is necessary. If there are more than 2 input
11609 // quads, disable the next transformation since it does not help SSSE3.
11610 bool V1Used = InputQuads[0] || InputQuads[1];
11611 bool V2Used = InputQuads[2] || InputQuads[3];
11612 if (Subtarget->hasSSSE3()) {
11613 if (InputQuads.count() == 2 && V1Used && V2Used) {
11614 BestLoQuad = InputQuads[0] ? 0 : 1;
11615 BestHiQuad = InputQuads[2] ? 2 : 3;
11617 if (InputQuads.count() > 2) {
11623 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11624 // the shuffle mask. If a quad is scored as -1, that means that it contains
11625 // words from all 4 input quadwords.
11627 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11629 BestLoQuad < 0 ? 0 : BestLoQuad,
11630 BestHiQuad < 0 ? 1 : BestHiQuad
11632 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11633 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11634 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11635 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11637 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11638 // source words for the shuffle, to aid later transformations.
11639 bool AllWordsInNewV = true;
11640 bool InOrder[2] = { true, true };
11641 for (unsigned i = 0; i != 8; ++i) {
11642 int idx = MaskVals[i];
11644 InOrder[i/4] = false;
11645 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11647 AllWordsInNewV = false;
11651 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11652 if (AllWordsInNewV) {
11653 for (int i = 0; i != 8; ++i) {
11654 int idx = MaskVals[i];
11657 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11658 if ((idx != i) && idx < 4)
11660 if ((idx != i) && idx > 3)
11669 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11670 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11671 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11672 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11673 unsigned TargetMask = 0;
11674 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11675 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11676 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11677 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11678 getShufflePSHUFLWImmediate(SVOp);
11679 V1 = NewV.getOperand(0);
11680 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11684 // Promote splats to a larger type which usually leads to more efficient code.
11685 // FIXME: Is this true if pshufb is available?
11686 if (SVOp->isSplat())
11687 return PromoteSplat(SVOp, DAG);
11689 // If we have SSSE3, and all words of the result are from 1 input vector,
11690 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11691 // is present, fall back to case 4.
11692 if (Subtarget->hasSSSE3()) {
11693 SmallVector<SDValue,16> pshufbMask;
11695 // If we have elements from both input vectors, set the high bit of the
11696 // shuffle mask element to zero out elements that come from V2 in the V1
11697 // mask, and elements that come from V1 in the V2 mask, so that the two
11698 // results can be OR'd together.
11699 bool TwoInputs = V1Used && V2Used;
11700 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11702 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11704 // Calculate the shuffle mask for the second input, shuffle it, and
11705 // OR it with the first shuffled input.
11706 CommuteVectorShuffleMask(MaskVals, 8);
11707 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11708 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11709 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11712 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11713 // and update MaskVals with new element order.
11714 std::bitset<8> InOrder;
11715 if (BestLoQuad >= 0) {
11716 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11717 for (int i = 0; i != 4; ++i) {
11718 int idx = MaskVals[i];
11721 } else if ((idx / 4) == BestLoQuad) {
11722 MaskV[i] = idx & 3;
11726 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11729 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11730 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11731 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11732 NewV.getOperand(0),
11733 getShufflePSHUFLWImmediate(SVOp), DAG);
11737 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11738 // and update MaskVals with the new element order.
11739 if (BestHiQuad >= 0) {
11740 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11741 for (unsigned i = 4; i != 8; ++i) {
11742 int idx = MaskVals[i];
11745 } else if ((idx / 4) == BestHiQuad) {
11746 MaskV[i] = (idx & 3) + 4;
11750 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11753 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11754 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11755 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11756 NewV.getOperand(0),
11757 getShufflePSHUFHWImmediate(SVOp), DAG);
11761 // In case BestHi & BestLo were both -1, which means each quadword has a word
11762 // from each of the four input quadwords, calculate the InOrder bitvector now
11763 // before falling through to the insert/extract cleanup.
11764 if (BestLoQuad == -1 && BestHiQuad == -1) {
11766 for (int i = 0; i != 8; ++i)
11767 if (MaskVals[i] < 0 || MaskVals[i] == i)
11771 // The other elements are put in the right place using pextrw and pinsrw.
11772 for (unsigned i = 0; i != 8; ++i) {
11775 int EltIdx = MaskVals[i];
11778 SDValue ExtOp = (EltIdx < 8) ?
11779 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11780 DAG.getIntPtrConstant(EltIdx)) :
11781 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11782 DAG.getIntPtrConstant(EltIdx - 8));
11783 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11784 DAG.getIntPtrConstant(i));
11789 /// \brief v16i16 shuffles
11791 /// FIXME: We only support generation of a single pshufb currently. We can
11792 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11793 /// well (e.g 2 x pshufb + 1 x por).
11795 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11796 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11797 SDValue V1 = SVOp->getOperand(0);
11798 SDValue V2 = SVOp->getOperand(1);
11801 if (V2.getOpcode() != ISD::UNDEF)
11804 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11805 return getPSHUFB(MaskVals, V1, dl, DAG);
11808 // v16i8 shuffles - Prefer shuffles in the following order:
11809 // 1. [ssse3] 1 x pshufb
11810 // 2. [ssse3] 2 x pshufb + 1 x por
11811 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11812 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11813 const X86Subtarget* Subtarget,
11814 SelectionDAG &DAG) {
11815 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11816 SDValue V1 = SVOp->getOperand(0);
11817 SDValue V2 = SVOp->getOperand(1);
11819 ArrayRef<int> MaskVals = SVOp->getMask();
11821 // Promote splats to a larger type which usually leads to more efficient code.
11822 // FIXME: Is this true if pshufb is available?
11823 if (SVOp->isSplat())
11824 return PromoteSplat(SVOp, DAG);
11826 // If we have SSSE3, case 1 is generated when all result bytes come from
11827 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11828 // present, fall back to case 3.
11830 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11831 if (Subtarget->hasSSSE3()) {
11832 SmallVector<SDValue,16> pshufbMask;
11834 // If all result elements are from one input vector, then only translate
11835 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11837 // Otherwise, we have elements from both input vectors, and must zero out
11838 // elements that come from V2 in the first mask, and V1 in the second mask
11839 // so that we can OR them together.
11840 for (unsigned i = 0; i != 16; ++i) {
11841 int EltIdx = MaskVals[i];
11842 if (EltIdx < 0 || EltIdx >= 16)
11844 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11846 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11847 DAG.getNode(ISD::BUILD_VECTOR, dl,
11848 MVT::v16i8, pshufbMask));
11850 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11851 // the 2nd operand if it's undefined or zero.
11852 if (V2.getOpcode() == ISD::UNDEF ||
11853 ISD::isBuildVectorAllZeros(V2.getNode()))
11856 // Calculate the shuffle mask for the second input, shuffle it, and
11857 // OR it with the first shuffled input.
11858 pshufbMask.clear();
11859 for (unsigned i = 0; i != 16; ++i) {
11860 int EltIdx = MaskVals[i];
11861 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11862 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11864 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11865 DAG.getNode(ISD::BUILD_VECTOR, dl,
11866 MVT::v16i8, pshufbMask));
11867 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11870 // No SSSE3 - Calculate in place words and then fix all out of place words
11871 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
11872 // the 16 different words that comprise the two doublequadword input vectors.
11873 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11874 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
11876 for (int i = 0; i != 8; ++i) {
11877 int Elt0 = MaskVals[i*2];
11878 int Elt1 = MaskVals[i*2+1];
11880 // This word of the result is all undef, skip it.
11881 if (Elt0 < 0 && Elt1 < 0)
11884 // This word of the result is already in the correct place, skip it.
11885 if ((Elt0 == i*2) && (Elt1 == i*2+1))
11888 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
11889 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
11892 // If Elt0 and Elt1 are defined, are consecutive, and can be load
11893 // using a single extract together, load it and store it.
11894 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
11895 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11896 DAG.getIntPtrConstant(Elt1 / 2));
11897 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11898 DAG.getIntPtrConstant(i));
11902 // If Elt1 is defined, extract it from the appropriate source. If the
11903 // source byte is not also odd, shift the extracted word left 8 bits
11904 // otherwise clear the bottom 8 bits if we need to do an or.
11906 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11907 DAG.getIntPtrConstant(Elt1 / 2));
11908 if ((Elt1 & 1) == 0)
11909 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
11911 TLI.getShiftAmountTy(InsElt.getValueType())));
11912 else if (Elt0 >= 0)
11913 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
11914 DAG.getConstant(0xFF00, MVT::i16));
11916 // If Elt0 is defined, extract it from the appropriate source. If the
11917 // source byte is not also even, shift the extracted word right 8 bits. If
11918 // Elt1 was also defined, OR the extracted values together before
11919 // inserting them in the result.
11921 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
11922 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
11923 if ((Elt0 & 1) != 0)
11924 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
11926 TLI.getShiftAmountTy(InsElt0.getValueType())));
11927 else if (Elt1 >= 0)
11928 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
11929 DAG.getConstant(0x00FF, MVT::i16));
11930 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
11933 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11934 DAG.getIntPtrConstant(i));
11936 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
11939 // v32i8 shuffles - Translate to VPSHUFB if possible.
11941 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
11942 const X86Subtarget *Subtarget,
11943 SelectionDAG &DAG) {
11944 MVT VT = SVOp->getSimpleValueType(0);
11945 SDValue V1 = SVOp->getOperand(0);
11946 SDValue V2 = SVOp->getOperand(1);
11948 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11950 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11951 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
11952 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
11954 // VPSHUFB may be generated if
11955 // (1) one of input vector is undefined or zeroinitializer.
11956 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
11957 // And (2) the mask indexes don't cross the 128-bit lane.
11958 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
11959 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
11962 if (V1IsAllZero && !V2IsAllZero) {
11963 CommuteVectorShuffleMask(MaskVals, 32);
11966 return getPSHUFB(MaskVals, V1, dl, DAG);
11969 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
11970 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
11971 /// done when every pair / quad of shuffle mask elements point to elements in
11972 /// the right sequence. e.g.
11973 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
11975 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
11976 SelectionDAG &DAG) {
11977 MVT VT = SVOp->getSimpleValueType(0);
11979 unsigned NumElems = VT.getVectorNumElements();
11982 switch (VT.SimpleTy) {
11983 default: llvm_unreachable("Unexpected!");
11986 return SDValue(SVOp, 0);
11987 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
11988 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
11989 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
11990 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
11991 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
11992 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
11995 SmallVector<int, 8> MaskVec;
11996 for (unsigned i = 0; i != NumElems; i += Scale) {
11998 for (unsigned j = 0; j != Scale; ++j) {
11999 int EltIdx = SVOp->getMaskElt(i+j);
12003 StartIdx = (EltIdx / Scale);
12004 if (EltIdx != (int)(StartIdx*Scale + j))
12007 MaskVec.push_back(StartIdx);
12010 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
12011 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
12012 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
12015 /// getVZextMovL - Return a zero-extending vector move low node.
12017 static SDValue getVZextMovL(MVT VT, MVT OpVT,
12018 SDValue SrcOp, SelectionDAG &DAG,
12019 const X86Subtarget *Subtarget, SDLoc dl) {
12020 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
12021 LoadSDNode *LD = nullptr;
12022 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
12023 LD = dyn_cast<LoadSDNode>(SrcOp);
12025 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
12027 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
12028 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
12029 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
12030 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
12031 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
12033 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
12034 return DAG.getNode(ISD::BITCAST, dl, VT,
12035 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12036 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
12038 SrcOp.getOperand(0)
12044 return DAG.getNode(ISD::BITCAST, dl, VT,
12045 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12046 DAG.getNode(ISD::BITCAST, dl,
12050 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
12051 /// which could not be matched by any known target speficic shuffle
12053 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12055 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
12056 if (NewOp.getNode())
12059 MVT VT = SVOp->getSimpleValueType(0);
12061 unsigned NumElems = VT.getVectorNumElements();
12062 unsigned NumLaneElems = NumElems / 2;
12065 MVT EltVT = VT.getVectorElementType();
12066 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
12069 SmallVector<int, 16> Mask;
12070 for (unsigned l = 0; l < 2; ++l) {
12071 // Build a shuffle mask for the output, discovering on the fly which
12072 // input vectors to use as shuffle operands (recorded in InputUsed).
12073 // If building a suitable shuffle vector proves too hard, then bail
12074 // out with UseBuildVector set.
12075 bool UseBuildVector = false;
12076 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
12077 unsigned LaneStart = l * NumLaneElems;
12078 for (unsigned i = 0; i != NumLaneElems; ++i) {
12079 // The mask element. This indexes into the input.
12080 int Idx = SVOp->getMaskElt(i+LaneStart);
12082 // the mask element does not index into any input vector.
12083 Mask.push_back(-1);
12087 // The input vector this mask element indexes into.
12088 int Input = Idx / NumLaneElems;
12090 // Turn the index into an offset from the start of the input vector.
12091 Idx -= Input * NumLaneElems;
12093 // Find or create a shuffle vector operand to hold this input.
12095 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
12096 if (InputUsed[OpNo] == Input)
12097 // This input vector is already an operand.
12099 if (InputUsed[OpNo] < 0) {
12100 // Create a new operand for this input vector.
12101 InputUsed[OpNo] = Input;
12106 if (OpNo >= array_lengthof(InputUsed)) {
12107 // More than two input vectors used! Give up on trying to create a
12108 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
12109 UseBuildVector = true;
12113 // Add the mask index for the new shuffle vector.
12114 Mask.push_back(Idx + OpNo * NumLaneElems);
12117 if (UseBuildVector) {
12118 SmallVector<SDValue, 16> SVOps;
12119 for (unsigned i = 0; i != NumLaneElems; ++i) {
12120 // The mask element. This indexes into the input.
12121 int Idx = SVOp->getMaskElt(i+LaneStart);
12123 SVOps.push_back(DAG.getUNDEF(EltVT));
12127 // The input vector this mask element indexes into.
12128 int Input = Idx / NumElems;
12130 // Turn the index into an offset from the start of the input vector.
12131 Idx -= Input * NumElems;
12133 // Extract the vector element by hand.
12134 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
12135 SVOp->getOperand(Input),
12136 DAG.getIntPtrConstant(Idx)));
12139 // Construct the output using a BUILD_VECTOR.
12140 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
12141 } else if (InputUsed[0] < 0) {
12142 // No input vectors were used! The result is undefined.
12143 Output[l] = DAG.getUNDEF(NVT);
12145 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
12146 (InputUsed[0] % 2) * NumLaneElems,
12148 // If only one input was used, use an undefined vector for the other.
12149 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
12150 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
12151 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
12152 // At least one input vector was used. Create a new shuffle vector.
12153 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
12159 // Concatenate the result back
12160 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
12163 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
12164 /// 4 elements, and match them with several different shuffle types.
12166 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12167 SDValue V1 = SVOp->getOperand(0);
12168 SDValue V2 = SVOp->getOperand(1);
12170 MVT VT = SVOp->getSimpleValueType(0);
12172 assert(VT.is128BitVector() && "Unsupported vector size");
12174 std::pair<int, int> Locs[4];
12175 int Mask1[] = { -1, -1, -1, -1 };
12176 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
12178 unsigned NumHi = 0;
12179 unsigned NumLo = 0;
12180 for (unsigned i = 0; i != 4; ++i) {
12181 int Idx = PermMask[i];
12183 Locs[i] = std::make_pair(-1, -1);
12185 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
12187 Locs[i] = std::make_pair(0, NumLo);
12188 Mask1[NumLo] = Idx;
12191 Locs[i] = std::make_pair(1, NumHi);
12193 Mask1[2+NumHi] = Idx;
12199 if (NumLo <= 2 && NumHi <= 2) {
12200 // If no more than two elements come from either vector. This can be
12201 // implemented with two shuffles. First shuffle gather the elements.
12202 // The second shuffle, which takes the first shuffle as both of its
12203 // vector operands, put the elements into the right order.
12204 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12206 int Mask2[] = { -1, -1, -1, -1 };
12208 for (unsigned i = 0; i != 4; ++i)
12209 if (Locs[i].first != -1) {
12210 unsigned Idx = (i < 2) ? 0 : 4;
12211 Idx += Locs[i].first * 2 + Locs[i].second;
12215 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
12218 if (NumLo == 3 || NumHi == 3) {
12219 // Otherwise, we must have three elements from one vector, call it X, and
12220 // one element from the other, call it Y. First, use a shufps to build an
12221 // intermediate vector with the one element from Y and the element from X
12222 // that will be in the same half in the final destination (the indexes don't
12223 // matter). Then, use a shufps to build the final vector, taking the half
12224 // containing the element from Y from the intermediate, and the other half
12227 // Normalize it so the 3 elements come from V1.
12228 CommuteVectorShuffleMask(PermMask, 4);
12232 // Find the element from V2.
12234 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
12235 int Val = PermMask[HiIndex];
12242 Mask1[0] = PermMask[HiIndex];
12244 Mask1[2] = PermMask[HiIndex^1];
12246 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12248 if (HiIndex >= 2) {
12249 Mask1[0] = PermMask[0];
12250 Mask1[1] = PermMask[1];
12251 Mask1[2] = HiIndex & 1 ? 6 : 4;
12252 Mask1[3] = HiIndex & 1 ? 4 : 6;
12253 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12256 Mask1[0] = HiIndex & 1 ? 2 : 0;
12257 Mask1[1] = HiIndex & 1 ? 0 : 2;
12258 Mask1[2] = PermMask[2];
12259 Mask1[3] = PermMask[3];
12264 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
12267 // Break it into (shuffle shuffle_hi, shuffle_lo).
12268 int LoMask[] = { -1, -1, -1, -1 };
12269 int HiMask[] = { -1, -1, -1, -1 };
12271 int *MaskPtr = LoMask;
12272 unsigned MaskIdx = 0;
12273 unsigned LoIdx = 0;
12274 unsigned HiIdx = 2;
12275 for (unsigned i = 0; i != 4; ++i) {
12282 int Idx = PermMask[i];
12284 Locs[i] = std::make_pair(-1, -1);
12285 } else if (Idx < 4) {
12286 Locs[i] = std::make_pair(MaskIdx, LoIdx);
12287 MaskPtr[LoIdx] = Idx;
12290 Locs[i] = std::make_pair(MaskIdx, HiIdx);
12291 MaskPtr[HiIdx] = Idx;
12296 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
12297 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
12298 int MaskOps[] = { -1, -1, -1, -1 };
12299 for (unsigned i = 0; i != 4; ++i)
12300 if (Locs[i].first != -1)
12301 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
12302 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
12305 static bool MayFoldVectorLoad(SDValue V) {
12306 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12307 V = V.getOperand(0);
12309 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12310 V = V.getOperand(0);
12311 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12312 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12313 // BUILD_VECTOR (load), undef
12314 V = V.getOperand(0);
12316 return MayFoldLoad(V);
12320 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12321 MVT VT = Op.getSimpleValueType();
12323 // Canonicalize to v2f64.
12324 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12325 return DAG.getNode(ISD::BITCAST, dl, VT,
12326 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12331 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12333 SDValue V1 = Op.getOperand(0);
12334 SDValue V2 = Op.getOperand(1);
12335 MVT VT = Op.getSimpleValueType();
12337 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12339 if (HasSSE2 && VT == MVT::v2f64)
12340 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12342 // v4f32 or v4i32: canonicalize to v4f32 (which is legal for SSE1)
12343 return DAG.getNode(ISD::BITCAST, dl, VT,
12344 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12345 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12346 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12350 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12351 SDValue V1 = Op.getOperand(0);
12352 SDValue V2 = Op.getOperand(1);
12353 MVT VT = Op.getSimpleValueType();
12355 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12356 "unsupported shuffle type");
12358 if (V2.getOpcode() == ISD::UNDEF)
12362 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12366 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12367 SDValue V1 = Op.getOperand(0);
12368 SDValue V2 = Op.getOperand(1);
12369 MVT VT = Op.getSimpleValueType();
12370 unsigned NumElems = VT.getVectorNumElements();
12372 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12373 // operand of these instructions is only memory, so check if there's a
12374 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12376 bool CanFoldLoad = false;
12378 // Trivial case, when V2 comes from a load.
12379 if (MayFoldVectorLoad(V2))
12380 CanFoldLoad = true;
12382 // When V1 is a load, it can be folded later into a store in isel, example:
12383 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12385 // (MOVLPSmr addr:$src1, VR128:$src2)
12386 // So, recognize this potential and also use MOVLPS or MOVLPD
12387 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12388 CanFoldLoad = true;
12390 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12392 if (HasSSE2 && NumElems == 2)
12393 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12396 // If we don't care about the second element, proceed to use movss.
12397 if (SVOp->getMaskElt(1) != -1)
12398 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12401 // movl and movlp will both match v2i64, but v2i64 is never matched by
12402 // movl earlier because we make it strict to avoid messing with the movlp load
12403 // folding logic (see the code above getMOVLP call). Match it here then,
12404 // this is horrible, but will stay like this until we move all shuffle
12405 // matching to x86 specific nodes. Note that for the 1st condition all
12406 // types are matched with movsd.
12408 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12409 // as to remove this logic from here, as much as possible
12410 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12411 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12412 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12415 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12417 // Invert the operand order and use SHUFPS to match it.
12418 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12419 getShuffleSHUFImmediate(SVOp), DAG);
12422 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12423 SelectionDAG &DAG) {
12425 MVT VT = Load->getSimpleValueType(0);
12426 MVT EVT = VT.getVectorElementType();
12427 SDValue Addr = Load->getOperand(1);
12428 SDValue NewAddr = DAG.getNode(
12429 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12430 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12433 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12434 DAG.getMachineFunction().getMachineMemOperand(
12435 Load->getMemOperand(), 0, EVT.getStoreSize()));
12439 // It is only safe to call this function if isINSERTPSMask is true for
12440 // this shufflevector mask.
12441 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12442 SelectionDAG &DAG) {
12443 // Generate an insertps instruction when inserting an f32 from memory onto a
12444 // v4f32 or when copying a member from one v4f32 to another.
12445 // We also use it for transferring i32 from one register to another,
12446 // since it simply copies the same bits.
12447 // If we're transferring an i32 from memory to a specific element in a
12448 // register, we output a generic DAG that will match the PINSRD
12450 MVT VT = SVOp->getSimpleValueType(0);
12451 MVT EVT = VT.getVectorElementType();
12452 SDValue V1 = SVOp->getOperand(0);
12453 SDValue V2 = SVOp->getOperand(1);
12454 auto Mask = SVOp->getMask();
12455 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12456 "unsupported vector type for insertps/pinsrd");
12458 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12459 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12460 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12464 unsigned DestIndex;
12468 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12471 // If we have 1 element from each vector, we have to check if we're
12472 // changing V1's element's place. If so, we're done. Otherwise, we
12473 // should assume we're changing V2's element's place and behave
12475 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12476 assert(DestIndex <= INT32_MAX && "truncated destination index");
12477 if (FromV1 == FromV2 &&
12478 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12482 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12485 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12486 "More than one element from V1 and from V2, or no elements from one "
12487 "of the vectors. This case should not have returned true from "
12492 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12495 // Get an index into the source vector in the range [0,4) (the mask is
12496 // in the range [0,8) because it can address V1 and V2)
12497 unsigned SrcIndex = Mask[DestIndex] % 4;
12498 if (MayFoldLoad(From)) {
12499 // Trivial case, when From comes from a load and is only used by the
12500 // shuffle. Make it use insertps from the vector that we need from that
12503 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12504 if (!NewLoad.getNode())
12507 if (EVT == MVT::f32) {
12508 // Create this as a scalar to vector to match the instruction pattern.
12509 SDValue LoadScalarToVector =
12510 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12511 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12512 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12514 } else { // EVT == MVT::i32
12515 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12516 // instruction, to match the PINSRD instruction, which loads an i32 to a
12517 // certain vector element.
12518 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12519 DAG.getConstant(DestIndex, MVT::i32));
12523 // Vector-element-to-vector
12524 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12525 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12528 // Reduce a vector shuffle to zext.
12529 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12530 SelectionDAG &DAG) {
12531 // PMOVZX is only available from SSE41.
12532 if (!Subtarget->hasSSE41())
12535 MVT VT = Op.getSimpleValueType();
12537 // Only AVX2 support 256-bit vector integer extending.
12538 if (!Subtarget->hasInt256() && VT.is256BitVector())
12541 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12543 SDValue V1 = Op.getOperand(0);
12544 SDValue V2 = Op.getOperand(1);
12545 unsigned NumElems = VT.getVectorNumElements();
12547 // Extending is an unary operation and the element type of the source vector
12548 // won't be equal to or larger than i64.
12549 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12550 VT.getVectorElementType() == MVT::i64)
12553 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12554 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12555 while ((1U << Shift) < NumElems) {
12556 if (SVOp->getMaskElt(1U << Shift) == 1)
12559 // The maximal ratio is 8, i.e. from i8 to i64.
12564 // Check the shuffle mask.
12565 unsigned Mask = (1U << Shift) - 1;
12566 for (unsigned i = 0; i != NumElems; ++i) {
12567 int EltIdx = SVOp->getMaskElt(i);
12568 if ((i & Mask) != 0 && EltIdx != -1)
12570 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12574 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12575 MVT NeVT = MVT::getIntegerVT(NBits);
12576 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12578 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12581 return DAG.getNode(ISD::BITCAST, DL, VT,
12582 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12585 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12586 SelectionDAG &DAG) {
12587 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12588 MVT VT = Op.getSimpleValueType();
12590 SDValue V1 = Op.getOperand(0);
12591 SDValue V2 = Op.getOperand(1);
12593 if (isZeroShuffle(SVOp))
12594 return getZeroVector(VT, Subtarget, DAG, dl);
12596 // Handle splat operations
12597 if (SVOp->isSplat()) {
12598 // Use vbroadcast whenever the splat comes from a foldable load
12599 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12600 if (Broadcast.getNode())
12604 // Check integer expanding shuffles.
12605 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12606 if (NewOp.getNode())
12609 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12611 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12612 VT == MVT::v32i8) {
12613 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12614 if (NewOp.getNode())
12615 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12616 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12617 // FIXME: Figure out a cleaner way to do this.
12618 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12619 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12620 if (NewOp.getNode()) {
12621 MVT NewVT = NewOp.getSimpleValueType();
12622 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12623 NewVT, true, false))
12624 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12627 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12628 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12629 if (NewOp.getNode()) {
12630 MVT NewVT = NewOp.getSimpleValueType();
12631 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12632 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12641 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12642 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12643 SDValue V1 = Op.getOperand(0);
12644 SDValue V2 = Op.getOperand(1);
12645 MVT VT = Op.getSimpleValueType();
12647 unsigned NumElems = VT.getVectorNumElements();
12648 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12649 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12650 bool V1IsSplat = false;
12651 bool V2IsSplat = false;
12652 bool HasSSE2 = Subtarget->hasSSE2();
12653 bool HasFp256 = Subtarget->hasFp256();
12654 bool HasInt256 = Subtarget->hasInt256();
12655 MachineFunction &MF = DAG.getMachineFunction();
12657 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
12659 // Check if we should use the experimental vector shuffle lowering. If so,
12660 // delegate completely to that code path.
12661 if (ExperimentalVectorShuffleLowering)
12662 return lowerVectorShuffle(Op, Subtarget, DAG);
12664 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12666 if (V1IsUndef && V2IsUndef)
12667 return DAG.getUNDEF(VT);
12669 // When we create a shuffle node we put the UNDEF node to second operand,
12670 // but in some cases the first operand may be transformed to UNDEF.
12671 // In this case we should just commute the node.
12673 return DAG.getCommutedVectorShuffle(*SVOp);
12675 // Vector shuffle lowering takes 3 steps:
12677 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12678 // narrowing and commutation of operands should be handled.
12679 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12681 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12682 // so the shuffle can be broken into other shuffles and the legalizer can
12683 // try the lowering again.
12685 // The general idea is that no vector_shuffle operation should be left to
12686 // be matched during isel, all of them must be converted to a target specific
12689 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12690 // narrowing and commutation of operands should be handled. The actual code
12691 // doesn't include all of those, work in progress...
12692 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12693 if (NewOp.getNode())
12696 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12698 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12699 // unpckh_undef). Only use pshufd if speed is more important than size.
12700 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12701 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12702 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12703 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12705 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12706 V2IsUndef && MayFoldVectorLoad(V1))
12707 return getMOVDDup(Op, dl, V1, DAG);
12709 if (isMOVHLPS_v_undef_Mask(M, VT))
12710 return getMOVHighToLow(Op, dl, DAG);
12712 // Use to match splats
12713 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12714 (VT == MVT::v2f64 || VT == MVT::v2i64))
12715 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12717 if (isPSHUFDMask(M, VT)) {
12718 // The actual implementation will match the mask in the if above and then
12719 // during isel it can match several different instructions, not only pshufd
12720 // as its name says, sad but true, emulate the behavior for now...
12721 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12722 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12724 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12726 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12727 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12729 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12730 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12733 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12737 if (isPALIGNRMask(M, VT, Subtarget))
12738 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12739 getShufflePALIGNRImmediate(SVOp),
12742 if (isVALIGNMask(M, VT, Subtarget))
12743 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12744 getShuffleVALIGNImmediate(SVOp),
12747 // Check if this can be converted into a logical shift.
12748 bool isLeft = false;
12749 unsigned ShAmt = 0;
12751 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12752 if (isShift && ShVal.hasOneUse()) {
12753 // If the shifted value has multiple uses, it may be cheaper to use
12754 // v_set0 + movlhps or movhlps, etc.
12755 MVT EltVT = VT.getVectorElementType();
12756 ShAmt *= EltVT.getSizeInBits();
12757 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12760 if (isMOVLMask(M, VT)) {
12761 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12762 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12763 if (!isMOVLPMask(M, VT)) {
12764 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12765 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12767 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12768 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12772 // FIXME: fold these into legal mask.
12773 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12774 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12776 if (isMOVHLPSMask(M, VT))
12777 return getMOVHighToLow(Op, dl, DAG);
12779 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12780 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12782 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12783 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12785 if (isMOVLPMask(M, VT))
12786 return getMOVLP(Op, dl, DAG, HasSSE2);
12788 if (ShouldXformToMOVHLPS(M, VT) ||
12789 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12790 return DAG.getCommutedVectorShuffle(*SVOp);
12793 // No better options. Use a vshldq / vsrldq.
12794 MVT EltVT = VT.getVectorElementType();
12795 ShAmt *= EltVT.getSizeInBits();
12796 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12799 bool Commuted = false;
12800 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12801 // 1,1,1,1 -> v8i16 though.
12802 BitVector UndefElements;
12803 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12804 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12806 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12807 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12810 // Canonicalize the splat or undef, if present, to be on the RHS.
12811 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12812 CommuteVectorShuffleMask(M, NumElems);
12814 std::swap(V1IsSplat, V2IsSplat);
12818 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12819 // Shuffling low element of v1 into undef, just return v1.
12822 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12823 // the instruction selector will not match, so get a canonical MOVL with
12824 // swapped operands to undo the commute.
12825 return getMOVL(DAG, dl, VT, V2, V1);
12828 if (isUNPCKLMask(M, VT, HasInt256))
12829 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12831 if (isUNPCKHMask(M, VT, HasInt256))
12832 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12835 // Normalize mask so all entries that point to V2 points to its first
12836 // element then try to match unpck{h|l} again. If match, return a
12837 // new vector_shuffle with the corrected mask.p
12838 SmallVector<int, 8> NewMask(M.begin(), M.end());
12839 NormalizeMask(NewMask, NumElems);
12840 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12841 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12842 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12843 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12847 // Commute is back and try unpck* again.
12848 // FIXME: this seems wrong.
12849 CommuteVectorShuffleMask(M, NumElems);
12851 std::swap(V1IsSplat, V2IsSplat);
12853 if (isUNPCKLMask(M, VT, HasInt256))
12854 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12856 if (isUNPCKHMask(M, VT, HasInt256))
12857 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12860 // Normalize the node to match x86 shuffle ops if needed
12861 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12862 return DAG.getCommutedVectorShuffle(*SVOp);
12864 // The checks below are all present in isShuffleMaskLegal, but they are
12865 // inlined here right now to enable us to directly emit target specific
12866 // nodes, and remove one by one until they don't return Op anymore.
12868 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
12869 SVOp->getSplatIndex() == 0 && V2IsUndef) {
12870 if (VT == MVT::v2f64 || VT == MVT::v2i64)
12871 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12874 if (isPSHUFHWMask(M, VT, HasInt256))
12875 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
12876 getShufflePSHUFHWImmediate(SVOp),
12879 if (isPSHUFLWMask(M, VT, HasInt256))
12880 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
12881 getShufflePSHUFLWImmediate(SVOp),
12884 unsigned MaskValue;
12885 if (isBlendMask(M, VT, Subtarget->hasSSE41(), HasInt256, &MaskValue))
12886 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
12888 if (isSHUFPMask(M, VT))
12889 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
12890 getShuffleSHUFImmediate(SVOp), DAG);
12892 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12893 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12894 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12895 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12897 //===--------------------------------------------------------------------===//
12898 // Generate target specific nodes for 128 or 256-bit shuffles only
12899 // supported in the AVX instruction set.
12902 // Handle VMOVDDUPY permutations
12903 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
12904 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
12906 // Handle VPERMILPS/D* permutations
12907 if (isVPERMILPMask(M, VT)) {
12908 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
12909 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
12910 getShuffleSHUFImmediate(SVOp), DAG);
12911 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
12912 getShuffleSHUFImmediate(SVOp), DAG);
12916 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
12917 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
12918 Idx*(NumElems/2), DAG, dl);
12920 // Handle VPERM2F128/VPERM2I128 permutations
12921 if (isVPERM2X128Mask(M, VT, HasFp256))
12922 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
12923 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
12925 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
12926 return getINSERTPS(SVOp, dl, DAG);
12929 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
12930 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
12932 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
12933 VT.is512BitVector()) {
12934 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
12935 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
12936 SmallVector<SDValue, 16> permclMask;
12937 for (unsigned i = 0; i != NumElems; ++i) {
12938 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
12941 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
12943 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
12944 return DAG.getNode(X86ISD::VPERMV, dl, VT,
12945 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
12946 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
12947 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
12950 //===--------------------------------------------------------------------===//
12951 // Since no target specific shuffle was selected for this generic one,
12952 // lower it into other known shuffles. FIXME: this isn't true yet, but
12953 // this is the plan.
12956 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
12957 if (VT == MVT::v8i16) {
12958 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
12959 if (NewOp.getNode())
12963 if (VT == MVT::v16i16 && HasInt256) {
12964 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
12965 if (NewOp.getNode())
12969 if (VT == MVT::v16i8) {
12970 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
12971 if (NewOp.getNode())
12975 if (VT == MVT::v32i8) {
12976 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
12977 if (NewOp.getNode())
12981 // Handle all 128-bit wide vectors with 4 elements, and match them with
12982 // several different shuffle types.
12983 if (NumElems == 4 && VT.is128BitVector())
12984 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
12986 // Handle general 256-bit shuffles
12987 if (VT.is256BitVector())
12988 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
12993 // This function assumes its argument is a BUILD_VECTOR of constants or
12994 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
12996 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
12997 unsigned &MaskValue) {
12999 unsigned NumElems = BuildVector->getNumOperands();
13000 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
13001 unsigned NumLanes = (NumElems - 1) / 8 + 1;
13002 unsigned NumElemsInLane = NumElems / NumLanes;
13004 // Blend for v16i16 should be symetric for the both lanes.
13005 for (unsigned i = 0; i < NumElemsInLane; ++i) {
13006 SDValue EltCond = BuildVector->getOperand(i);
13007 SDValue SndLaneEltCond =
13008 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
13010 int Lane1Cond = -1, Lane2Cond = -1;
13011 if (isa<ConstantSDNode>(EltCond))
13012 Lane1Cond = !isZero(EltCond);
13013 if (isa<ConstantSDNode>(SndLaneEltCond))
13014 Lane2Cond = !isZero(SndLaneEltCond);
13016 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
13017 // Lane1Cond != 0, means we want the first argument.
13018 // Lane1Cond == 0, means we want the second argument.
13019 // The encoding of this argument is 0 for the first argument, 1
13020 // for the second. Therefore, invert the condition.
13021 MaskValue |= !Lane1Cond << i;
13022 else if (Lane1Cond < 0)
13023 MaskValue |= !Lane2Cond << i;
13030 /// \brief Try to lower a VSELECT instruction to a vector shuffle.
13031 static SDValue lowerVSELECTtoVectorShuffle(SDValue Op,
13032 const X86Subtarget *Subtarget,
13033 SelectionDAG &DAG) {
13034 SDValue Cond = Op.getOperand(0);
13035 SDValue LHS = Op.getOperand(1);
13036 SDValue RHS = Op.getOperand(2);
13038 MVT VT = Op.getSimpleValueType();
13040 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
13042 auto *CondBV = cast<BuildVectorSDNode>(Cond);
13044 // Only non-legal VSELECTs reach this lowering, convert those into generic
13045 // shuffles and re-use the shuffle lowering path for blends.
13046 SmallVector<int, 32> Mask;
13047 for (int i = 0, Size = VT.getVectorNumElements(); i < Size; ++i) {
13048 SDValue CondElt = CondBV->getOperand(i);
13050 isa<ConstantSDNode>(CondElt) ? i + (isZero(CondElt) ? Size : 0) : -1);
13052 return DAG.getVectorShuffle(VT, dl, LHS, RHS, Mask);
13055 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
13056 // A vselect where all conditions and data are constants can be optimized into
13057 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
13058 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
13059 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
13060 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
13063 // Try to lower this to a blend-style vector shuffle. This can handle all
13064 // constant condition cases.
13065 SDValue BlendOp = lowerVSELECTtoVectorShuffle(Op, Subtarget, DAG);
13066 if (BlendOp.getNode())
13069 // Variable blends are only legal from SSE4.1 onward.
13070 if (!Subtarget->hasSSE41())
13073 // Some types for vselect were previously set to Expand, not Legal or
13074 // Custom. Return an empty SDValue so we fall-through to Expand, after
13075 // the Custom lowering phase.
13076 MVT VT = Op.getSimpleValueType();
13077 switch (VT.SimpleTy) {
13082 if (Subtarget->hasBWI() && Subtarget->hasVLX())
13087 // We couldn't create a "Blend with immediate" node.
13088 // This node should still be legal, but we'll have to emit a blendv*
13093 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
13094 MVT VT = Op.getSimpleValueType();
13097 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
13100 if (VT.getSizeInBits() == 8) {
13101 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
13102 Op.getOperand(0), Op.getOperand(1));
13103 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13104 DAG.getValueType(VT));
13105 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13108 if (VT.getSizeInBits() == 16) {
13109 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13110 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
13112 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13113 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13114 DAG.getNode(ISD::BITCAST, dl,
13117 Op.getOperand(1)));
13118 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
13119 Op.getOperand(0), Op.getOperand(1));
13120 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13121 DAG.getValueType(VT));
13122 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13125 if (VT == MVT::f32) {
13126 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
13127 // the result back to FR32 register. It's only worth matching if the
13128 // result has a single use which is a store or a bitcast to i32. And in
13129 // the case of a store, it's not worth it if the index is a constant 0,
13130 // because a MOVSSmr can be used instead, which is smaller and faster.
13131 if (!Op.hasOneUse())
13133 SDNode *User = *Op.getNode()->use_begin();
13134 if ((User->getOpcode() != ISD::STORE ||
13135 (isa<ConstantSDNode>(Op.getOperand(1)) &&
13136 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
13137 (User->getOpcode() != ISD::BITCAST ||
13138 User->getValueType(0) != MVT::i32))
13140 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13141 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
13144 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
13147 if (VT == MVT::i32 || VT == MVT::i64) {
13148 // ExtractPS/pextrq works with constant index.
13149 if (isa<ConstantSDNode>(Op.getOperand(1)))
13155 /// Extract one bit from mask vector, like v16i1 or v8i1.
13156 /// AVX-512 feature.
13158 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
13159 SDValue Vec = Op.getOperand(0);
13161 MVT VecVT = Vec.getSimpleValueType();
13162 SDValue Idx = Op.getOperand(1);
13163 MVT EltVT = Op.getSimpleValueType();
13165 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
13166 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
13167 "Unexpected vector type in ExtractBitFromMaskVector");
13169 // variable index can't be handled in mask registers,
13170 // extend vector to VR512
13171 if (!isa<ConstantSDNode>(Idx)) {
13172 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13173 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
13174 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
13175 ExtVT.getVectorElementType(), Ext, Idx);
13176 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
13179 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13180 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13181 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
13182 rc = getRegClassFor(MVT::v16i1);
13183 unsigned MaxSift = rc->getSize()*8 - 1;
13184 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
13185 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13186 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
13187 DAG.getConstant(MaxSift, MVT::i8));
13188 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
13189 DAG.getIntPtrConstant(0));
13193 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
13194 SelectionDAG &DAG) const {
13196 SDValue Vec = Op.getOperand(0);
13197 MVT VecVT = Vec.getSimpleValueType();
13198 SDValue Idx = Op.getOperand(1);
13200 if (Op.getSimpleValueType() == MVT::i1)
13201 return ExtractBitFromMaskVector(Op, DAG);
13203 if (!isa<ConstantSDNode>(Idx)) {
13204 if (VecVT.is512BitVector() ||
13205 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
13206 VecVT.getVectorElementType().getSizeInBits() == 32)) {
13209 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
13210 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
13211 MaskEltVT.getSizeInBits());
13213 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
13214 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
13215 getZeroVector(MaskVT, Subtarget, DAG, dl),
13216 Idx, DAG.getConstant(0, getPointerTy()));
13217 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
13218 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
13219 Perm, DAG.getConstant(0, getPointerTy()));
13224 // If this is a 256-bit vector result, first extract the 128-bit vector and
13225 // then extract the element from the 128-bit vector.
13226 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
13228 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13229 // Get the 128-bit vector.
13230 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
13231 MVT EltVT = VecVT.getVectorElementType();
13233 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
13235 //if (IdxVal >= NumElems/2)
13236 // IdxVal -= NumElems/2;
13237 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
13238 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
13239 DAG.getConstant(IdxVal, MVT::i32));
13242 assert(VecVT.is128BitVector() && "Unexpected vector length");
13244 if (Subtarget->hasSSE41()) {
13245 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
13250 MVT VT = Op.getSimpleValueType();
13251 // TODO: handle v16i8.
13252 if (VT.getSizeInBits() == 16) {
13253 SDValue Vec = Op.getOperand(0);
13254 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13256 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13257 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13258 DAG.getNode(ISD::BITCAST, dl,
13260 Op.getOperand(1)));
13261 // Transform it so it match pextrw which produces a 32-bit result.
13262 MVT EltVT = MVT::i32;
13263 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
13264 Op.getOperand(0), Op.getOperand(1));
13265 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
13266 DAG.getValueType(VT));
13267 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13270 if (VT.getSizeInBits() == 32) {
13271 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13275 // SHUFPS the element to the lowest double word, then movss.
13276 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
13277 MVT VVT = Op.getOperand(0).getSimpleValueType();
13278 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13279 DAG.getUNDEF(VVT), Mask);
13280 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13281 DAG.getIntPtrConstant(0));
13284 if (VT.getSizeInBits() == 64) {
13285 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
13286 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
13287 // to match extract_elt for f64.
13288 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13292 // UNPCKHPD the element to the lowest double word, then movsd.
13293 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
13294 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
13295 int Mask[2] = { 1, -1 };
13296 MVT VVT = Op.getOperand(0).getSimpleValueType();
13297 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13298 DAG.getUNDEF(VVT), Mask);
13299 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13300 DAG.getIntPtrConstant(0));
13306 /// Insert one bit to mask vector, like v16i1 or v8i1.
13307 /// AVX-512 feature.
13309 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13311 SDValue Vec = Op.getOperand(0);
13312 SDValue Elt = Op.getOperand(1);
13313 SDValue Idx = Op.getOperand(2);
13314 MVT VecVT = Vec.getSimpleValueType();
13316 if (!isa<ConstantSDNode>(Idx)) {
13317 // Non constant index. Extend source and destination,
13318 // insert element and then truncate the result.
13319 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13320 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13321 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13322 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13323 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13324 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13327 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13328 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13329 if (Vec.getOpcode() == ISD::UNDEF)
13330 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13331 DAG.getConstant(IdxVal, MVT::i8));
13332 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13333 unsigned MaxSift = rc->getSize()*8 - 1;
13334 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13335 DAG.getConstant(MaxSift, MVT::i8));
13336 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13337 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13338 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13341 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13342 SelectionDAG &DAG) const {
13343 MVT VT = Op.getSimpleValueType();
13344 MVT EltVT = VT.getVectorElementType();
13346 if (EltVT == MVT::i1)
13347 return InsertBitToMaskVector(Op, DAG);
13350 SDValue N0 = Op.getOperand(0);
13351 SDValue N1 = Op.getOperand(1);
13352 SDValue N2 = Op.getOperand(2);
13353 if (!isa<ConstantSDNode>(N2))
13355 auto *N2C = cast<ConstantSDNode>(N2);
13356 unsigned IdxVal = N2C->getZExtValue();
13358 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13359 // into that, and then insert the subvector back into the result.
13360 if (VT.is256BitVector() || VT.is512BitVector()) {
13361 // Get the desired 128-bit vector half.
13362 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13364 // Insert the element into the desired half.
13365 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13366 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13368 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13369 DAG.getConstant(IdxIn128, MVT::i32));
13371 // Insert the changed part back to the 256-bit vector
13372 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13374 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13376 if (Subtarget->hasSSE41()) {
13377 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13379 if (VT == MVT::v8i16) {
13380 Opc = X86ISD::PINSRW;
13382 assert(VT == MVT::v16i8);
13383 Opc = X86ISD::PINSRB;
13386 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13388 if (N1.getValueType() != MVT::i32)
13389 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13390 if (N2.getValueType() != MVT::i32)
13391 N2 = DAG.getIntPtrConstant(IdxVal);
13392 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13395 if (EltVT == MVT::f32) {
13396 // Bits [7:6] of the constant are the source select. This will always be
13397 // zero here. The DAG Combiner may combine an extract_elt index into
13399 // bits. For example (insert (extract, 3), 2) could be matched by
13401 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13402 // Bits [5:4] of the constant are the destination select. This is the
13403 // value of the incoming immediate.
13404 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13405 // combine either bitwise AND or insert of float 0.0 to set these bits.
13406 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13407 // Create this as a scalar to vector..
13408 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13409 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13412 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13413 // PINSR* works with constant index.
13418 if (EltVT == MVT::i8)
13421 if (EltVT.getSizeInBits() == 16) {
13422 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13423 // as its second argument.
13424 if (N1.getValueType() != MVT::i32)
13425 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13426 if (N2.getValueType() != MVT::i32)
13427 N2 = DAG.getIntPtrConstant(IdxVal);
13428 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13433 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13435 MVT OpVT = Op.getSimpleValueType();
13437 // If this is a 256-bit vector result, first insert into a 128-bit
13438 // vector and then insert into the 256-bit vector.
13439 if (!OpVT.is128BitVector()) {
13440 // Insert into a 128-bit vector.
13441 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13442 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13443 OpVT.getVectorNumElements() / SizeFactor);
13445 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13447 // Insert the 128-bit vector.
13448 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13451 if (OpVT == MVT::v1i64 &&
13452 Op.getOperand(0).getValueType() == MVT::i64)
13453 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13455 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13456 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13457 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13458 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13461 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13462 // a simple subregister reference or explicit instructions to grab
13463 // upper bits of a vector.
13464 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13465 SelectionDAG &DAG) {
13467 SDValue In = Op.getOperand(0);
13468 SDValue Idx = Op.getOperand(1);
13469 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13470 MVT ResVT = Op.getSimpleValueType();
13471 MVT InVT = In.getSimpleValueType();
13473 if (Subtarget->hasFp256()) {
13474 if (ResVT.is128BitVector() &&
13475 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13476 isa<ConstantSDNode>(Idx)) {
13477 return Extract128BitVector(In, IdxVal, DAG, dl);
13479 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13480 isa<ConstantSDNode>(Idx)) {
13481 return Extract256BitVector(In, IdxVal, DAG, dl);
13487 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13488 // simple superregister reference or explicit instructions to insert
13489 // the upper bits of a vector.
13490 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13491 SelectionDAG &DAG) {
13492 if (!Subtarget->hasAVX())
13496 SDValue Vec = Op.getOperand(0);
13497 SDValue SubVec = Op.getOperand(1);
13498 SDValue Idx = Op.getOperand(2);
13500 if (!isa<ConstantSDNode>(Idx))
13503 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13504 MVT OpVT = Op.getSimpleValueType();
13505 MVT SubVecVT = SubVec.getSimpleValueType();
13507 // Fold two 16-byte subvector loads into one 32-byte load:
13508 // (insert_subvector (insert_subvector undef, (load addr), 0),
13509 // (load addr + 16), Elts/2)
13511 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
13512 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
13513 OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
13514 !Subtarget->isUnalignedMem32Slow()) {
13515 SDValue SubVec2 = Vec.getOperand(1);
13516 if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
13517 if (Idx2->getZExtValue() == 0) {
13518 SDValue Ops[] = { SubVec2, SubVec };
13519 SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
13526 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13527 SubVecVT.is128BitVector())
13528 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13530 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
13531 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13536 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13537 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13538 // one of the above mentioned nodes. It has to be wrapped because otherwise
13539 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13540 // be used to form addressing mode. These wrapped nodes will be selected
13543 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13544 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13546 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13547 // global base reg.
13548 unsigned char OpFlag = 0;
13549 unsigned WrapperKind = X86ISD::Wrapper;
13550 CodeModel::Model M = DAG.getTarget().getCodeModel();
13552 if (Subtarget->isPICStyleRIPRel() &&
13553 (M == CodeModel::Small || M == CodeModel::Kernel))
13554 WrapperKind = X86ISD::WrapperRIP;
13555 else if (Subtarget->isPICStyleGOT())
13556 OpFlag = X86II::MO_GOTOFF;
13557 else if (Subtarget->isPICStyleStubPIC())
13558 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13560 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13561 CP->getAlignment(),
13562 CP->getOffset(), OpFlag);
13564 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13565 // With PIC, the address is actually $g + Offset.
13567 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13568 DAG.getNode(X86ISD::GlobalBaseReg,
13569 SDLoc(), getPointerTy()),
13576 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13577 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13579 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13580 // global base reg.
13581 unsigned char OpFlag = 0;
13582 unsigned WrapperKind = X86ISD::Wrapper;
13583 CodeModel::Model M = DAG.getTarget().getCodeModel();
13585 if (Subtarget->isPICStyleRIPRel() &&
13586 (M == CodeModel::Small || M == CodeModel::Kernel))
13587 WrapperKind = X86ISD::WrapperRIP;
13588 else if (Subtarget->isPICStyleGOT())
13589 OpFlag = X86II::MO_GOTOFF;
13590 else if (Subtarget->isPICStyleStubPIC())
13591 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13593 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13596 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13598 // With PIC, the address is actually $g + Offset.
13600 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13601 DAG.getNode(X86ISD::GlobalBaseReg,
13602 SDLoc(), getPointerTy()),
13609 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13610 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13612 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13613 // global base reg.
13614 unsigned char OpFlag = 0;
13615 unsigned WrapperKind = X86ISD::Wrapper;
13616 CodeModel::Model M = DAG.getTarget().getCodeModel();
13618 if (Subtarget->isPICStyleRIPRel() &&
13619 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13620 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13621 OpFlag = X86II::MO_GOTPCREL;
13622 WrapperKind = X86ISD::WrapperRIP;
13623 } else if (Subtarget->isPICStyleGOT()) {
13624 OpFlag = X86II::MO_GOT;
13625 } else if (Subtarget->isPICStyleStubPIC()) {
13626 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13627 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13628 OpFlag = X86II::MO_DARWIN_NONLAZY;
13631 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13634 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13636 // With PIC, the address is actually $g + Offset.
13637 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13638 !Subtarget->is64Bit()) {
13639 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13640 DAG.getNode(X86ISD::GlobalBaseReg,
13641 SDLoc(), getPointerTy()),
13645 // For symbols that require a load from a stub to get the address, emit the
13647 if (isGlobalStubReference(OpFlag))
13648 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13649 MachinePointerInfo::getGOT(), false, false, false, 0);
13655 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13656 // Create the TargetBlockAddressAddress node.
13657 unsigned char OpFlags =
13658 Subtarget->ClassifyBlockAddressReference();
13659 CodeModel::Model M = DAG.getTarget().getCodeModel();
13660 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13661 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13663 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13666 if (Subtarget->isPICStyleRIPRel() &&
13667 (M == CodeModel::Small || M == CodeModel::Kernel))
13668 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13670 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13672 // With PIC, the address is actually $g + Offset.
13673 if (isGlobalRelativeToPICBase(OpFlags)) {
13674 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13675 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13683 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13684 int64_t Offset, SelectionDAG &DAG) const {
13685 // Create the TargetGlobalAddress node, folding in the constant
13686 // offset if it is legal.
13687 unsigned char OpFlags =
13688 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13689 CodeModel::Model M = DAG.getTarget().getCodeModel();
13691 if (OpFlags == X86II::MO_NO_FLAG &&
13692 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13693 // A direct static reference to a global.
13694 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13697 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13700 if (Subtarget->isPICStyleRIPRel() &&
13701 (M == CodeModel::Small || M == CodeModel::Kernel))
13702 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13704 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13706 // With PIC, the address is actually $g + Offset.
13707 if (isGlobalRelativeToPICBase(OpFlags)) {
13708 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13709 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13713 // For globals that require a load from a stub to get the address, emit the
13715 if (isGlobalStubReference(OpFlags))
13716 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13717 MachinePointerInfo::getGOT(), false, false, false, 0);
13719 // If there was a non-zero offset that we didn't fold, create an explicit
13720 // addition for it.
13722 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13723 DAG.getConstant(Offset, getPointerTy()));
13729 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13730 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13731 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13732 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13736 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13737 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13738 unsigned char OperandFlags, bool LocalDynamic = false) {
13739 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13740 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13742 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13743 GA->getValueType(0),
13747 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13751 SDValue Ops[] = { Chain, TGA, *InFlag };
13752 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13754 SDValue Ops[] = { Chain, TGA };
13755 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13758 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13759 MFI->setAdjustsStack(true);
13760 MFI->setHasCalls(true);
13762 SDValue Flag = Chain.getValue(1);
13763 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13766 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13768 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13771 SDLoc dl(GA); // ? function entry point might be better
13772 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13773 DAG.getNode(X86ISD::GlobalBaseReg,
13774 SDLoc(), PtrVT), InFlag);
13775 InFlag = Chain.getValue(1);
13777 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13780 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13782 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13784 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13785 X86::RAX, X86II::MO_TLSGD);
13788 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13794 // Get the start address of the TLS block for this module.
13795 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13796 .getInfo<X86MachineFunctionInfo>();
13797 MFI->incNumLocalDynamicTLSAccesses();
13801 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13802 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13805 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13806 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13807 InFlag = Chain.getValue(1);
13808 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13809 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13812 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13816 unsigned char OperandFlags = X86II::MO_DTPOFF;
13817 unsigned WrapperKind = X86ISD::Wrapper;
13818 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13819 GA->getValueType(0),
13820 GA->getOffset(), OperandFlags);
13821 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13823 // Add x@dtpoff with the base.
13824 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13827 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13828 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13829 const EVT PtrVT, TLSModel::Model model,
13830 bool is64Bit, bool isPIC) {
13833 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13834 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13835 is64Bit ? 257 : 256));
13837 SDValue ThreadPointer =
13838 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13839 MachinePointerInfo(Ptr), false, false, false, 0);
13841 unsigned char OperandFlags = 0;
13842 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13844 unsigned WrapperKind = X86ISD::Wrapper;
13845 if (model == TLSModel::LocalExec) {
13846 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13847 } else if (model == TLSModel::InitialExec) {
13849 OperandFlags = X86II::MO_GOTTPOFF;
13850 WrapperKind = X86ISD::WrapperRIP;
13852 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13855 llvm_unreachable("Unexpected model");
13858 // emit "addl x@ntpoff,%eax" (local exec)
13859 // or "addl x@indntpoff,%eax" (initial exec)
13860 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13862 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13863 GA->getOffset(), OperandFlags);
13864 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13866 if (model == TLSModel::InitialExec) {
13867 if (isPIC && !is64Bit) {
13868 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13869 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13873 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13874 MachinePointerInfo::getGOT(), false, false, false, 0);
13877 // The address of the thread local variable is the add of the thread
13878 // pointer with the offset of the variable.
13879 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13883 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13885 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13886 const GlobalValue *GV = GA->getGlobal();
13888 if (Subtarget->isTargetELF()) {
13889 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13892 case TLSModel::GeneralDynamic:
13893 if (Subtarget->is64Bit())
13894 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
13895 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
13896 case TLSModel::LocalDynamic:
13897 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
13898 Subtarget->is64Bit());
13899 case TLSModel::InitialExec:
13900 case TLSModel::LocalExec:
13901 return LowerToTLSExecModel(
13902 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
13903 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
13905 llvm_unreachable("Unknown TLS model.");
13908 if (Subtarget->isTargetDarwin()) {
13909 // Darwin only has one model of TLS. Lower to that.
13910 unsigned char OpFlag = 0;
13911 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
13912 X86ISD::WrapperRIP : X86ISD::Wrapper;
13914 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13915 // global base reg.
13916 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
13917 !Subtarget->is64Bit();
13919 OpFlag = X86II::MO_TLVP_PIC_BASE;
13921 OpFlag = X86II::MO_TLVP;
13923 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
13924 GA->getValueType(0),
13925 GA->getOffset(), OpFlag);
13926 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13928 // With PIC32, the address is actually $g + Offset.
13930 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13931 DAG.getNode(X86ISD::GlobalBaseReg,
13932 SDLoc(), getPointerTy()),
13935 // Lowering the machine isd will make sure everything is in the right
13937 SDValue Chain = DAG.getEntryNode();
13938 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13939 SDValue Args[] = { Chain, Offset };
13940 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
13942 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
13943 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13944 MFI->setAdjustsStack(true);
13946 // And our return value (tls address) is in the standard call return value
13948 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
13949 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
13950 Chain.getValue(1));
13953 if (Subtarget->isTargetKnownWindowsMSVC() ||
13954 Subtarget->isTargetWindowsGNU()) {
13955 // Just use the implicit TLS architecture
13956 // Need to generate someting similar to:
13957 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
13959 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
13960 // mov rcx, qword [rdx+rcx*8]
13961 // mov eax, .tls$:tlsvar
13962 // [rax+rcx] contains the address
13963 // Windows 64bit: gs:0x58
13964 // Windows 32bit: fs:__tls_array
13967 SDValue Chain = DAG.getEntryNode();
13969 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
13970 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
13971 // use its literal value of 0x2C.
13972 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
13973 ? Type::getInt8PtrTy(*DAG.getContext(),
13975 : Type::getInt32PtrTy(*DAG.getContext(),
13979 Subtarget->is64Bit()
13980 ? DAG.getIntPtrConstant(0x58)
13981 : (Subtarget->isTargetWindowsGNU()
13982 ? DAG.getIntPtrConstant(0x2C)
13983 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
13985 SDValue ThreadPointer =
13986 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
13987 MachinePointerInfo(Ptr), false, false, false, 0);
13989 // Load the _tls_index variable
13990 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
13991 if (Subtarget->is64Bit())
13992 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
13993 IDX, MachinePointerInfo(), MVT::i32,
13994 false, false, false, 0);
13996 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
13997 false, false, false, 0);
13999 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
14001 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
14003 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
14004 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
14005 false, false, false, 0);
14007 // Get the offset of start of .tls section
14008 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
14009 GA->getValueType(0),
14010 GA->getOffset(), X86II::MO_SECREL);
14011 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
14013 // The address of the thread local variable is the add of the thread
14014 // pointer with the offset of the variable.
14015 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
14018 llvm_unreachable("TLS not implemented for this target.");
14021 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
14022 /// and take a 2 x i32 value to shift plus a shift amount.
14023 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
14024 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
14025 MVT VT = Op.getSimpleValueType();
14026 unsigned VTBits = VT.getSizeInBits();
14028 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
14029 SDValue ShOpLo = Op.getOperand(0);
14030 SDValue ShOpHi = Op.getOperand(1);
14031 SDValue ShAmt = Op.getOperand(2);
14032 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
14033 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
14035 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14036 DAG.getConstant(VTBits - 1, MVT::i8));
14037 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
14038 DAG.getConstant(VTBits - 1, MVT::i8))
14039 : DAG.getConstant(0, VT);
14041 SDValue Tmp2, Tmp3;
14042 if (Op.getOpcode() == ISD::SHL_PARTS) {
14043 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
14044 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
14046 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
14047 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
14050 // If the shift amount is larger or equal than the width of a part we can't
14051 // rely on the results of shld/shrd. Insert a test and select the appropriate
14052 // values for large shift amounts.
14053 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14054 DAG.getConstant(VTBits, MVT::i8));
14055 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
14056 AndNode, DAG.getConstant(0, MVT::i8));
14059 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
14060 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
14061 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
14063 if (Op.getOpcode() == ISD::SHL_PARTS) {
14064 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14065 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14067 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14068 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14071 SDValue Ops[2] = { Lo, Hi };
14072 return DAG.getMergeValues(Ops, dl);
14075 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
14076 SelectionDAG &DAG) const {
14077 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
14080 if (SrcVT.isVector()) {
14081 if (SrcVT.getVectorElementType() == MVT::i1) {
14082 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
14083 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14084 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
14085 Op.getOperand(0)));
14090 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
14091 "Unknown SINT_TO_FP to lower!");
14093 // These are really Legal; return the operand so the caller accepts it as
14095 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
14097 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
14098 Subtarget->is64Bit()) {
14102 unsigned Size = SrcVT.getSizeInBits()/8;
14103 MachineFunction &MF = DAG.getMachineFunction();
14104 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
14105 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14106 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14108 MachinePointerInfo::getFixedStack(SSFI),
14110 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
14113 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
14115 SelectionDAG &DAG) const {
14119 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
14121 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
14123 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
14125 unsigned ByteSize = SrcVT.getSizeInBits()/8;
14127 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
14128 MachineMemOperand *MMO;
14130 int SSFI = FI->getIndex();
14132 DAG.getMachineFunction()
14133 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14134 MachineMemOperand::MOLoad, ByteSize, ByteSize);
14136 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
14137 StackSlot = StackSlot.getOperand(1);
14139 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
14140 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
14142 Tys, Ops, SrcVT, MMO);
14145 Chain = Result.getValue(1);
14146 SDValue InFlag = Result.getValue(2);
14148 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
14149 // shouldn't be necessary except that RFP cannot be live across
14150 // multiple blocks. When stackifier is fixed, they can be uncoupled.
14151 MachineFunction &MF = DAG.getMachineFunction();
14152 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
14153 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
14154 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14155 Tys = DAG.getVTList(MVT::Other);
14157 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
14159 MachineMemOperand *MMO =
14160 DAG.getMachineFunction()
14161 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14162 MachineMemOperand::MOStore, SSFISize, SSFISize);
14164 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
14165 Ops, Op.getValueType(), MMO);
14166 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
14167 MachinePointerInfo::getFixedStack(SSFI),
14168 false, false, false, 0);
14174 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
14175 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
14176 SelectionDAG &DAG) const {
14177 // This algorithm is not obvious. Here it is what we're trying to output:
14180 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
14181 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
14183 haddpd %xmm0, %xmm0
14185 pshufd $0x4e, %xmm0, %xmm1
14191 LLVMContext *Context = DAG.getContext();
14193 // Build some magic constants.
14194 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
14195 Constant *C0 = ConstantDataVector::get(*Context, CV0);
14196 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
14198 SmallVector<Constant*,2> CV1;
14200 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14201 APInt(64, 0x4330000000000000ULL))));
14203 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14204 APInt(64, 0x4530000000000000ULL))));
14205 Constant *C1 = ConstantVector::get(CV1);
14206 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
14208 // Load the 64-bit value into an XMM register.
14209 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
14211 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
14212 MachinePointerInfo::getConstantPool(),
14213 false, false, false, 16);
14214 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
14215 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
14218 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
14219 MachinePointerInfo::getConstantPool(),
14220 false, false, false, 16);
14221 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
14222 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
14225 if (Subtarget->hasSSE3()) {
14226 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
14227 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
14229 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
14230 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
14232 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
14233 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
14237 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
14238 DAG.getIntPtrConstant(0));
14241 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
14242 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
14243 SelectionDAG &DAG) const {
14245 // FP constant to bias correct the final result.
14246 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
14249 // Load the 32-bit value into an XMM register.
14250 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
14253 // Zero out the upper parts of the register.
14254 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
14256 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14257 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
14258 DAG.getIntPtrConstant(0));
14260 // Or the load with the bias.
14261 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
14262 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14263 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14264 MVT::v2f64, Load)),
14265 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14266 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14267 MVT::v2f64, Bias)));
14268 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14269 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
14270 DAG.getIntPtrConstant(0));
14272 // Subtract the bias.
14273 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
14275 // Handle final rounding.
14276 EVT DestVT = Op.getValueType();
14278 if (DestVT.bitsLT(MVT::f64))
14279 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
14280 DAG.getIntPtrConstant(0));
14281 if (DestVT.bitsGT(MVT::f64))
14282 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
14284 // Handle final rounding.
14288 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
14289 const X86Subtarget &Subtarget) {
14290 // The algorithm is the following:
14291 // #ifdef __SSE4_1__
14292 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14293 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14294 // (uint4) 0x53000000, 0xaa);
14296 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14297 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14299 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14300 // return (float4) lo + fhi;
14303 SDValue V = Op->getOperand(0);
14304 EVT VecIntVT = V.getValueType();
14305 bool Is128 = VecIntVT == MVT::v4i32;
14306 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
14307 // If we convert to something else than the supported type, e.g., to v4f64,
14309 if (VecFloatVT != Op->getValueType(0))
14312 unsigned NumElts = VecIntVT.getVectorNumElements();
14313 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
14314 "Unsupported custom type");
14315 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14317 // In the #idef/#else code, we have in common:
14318 // - The vector of constants:
14324 // Create the splat vector for 0x4b000000.
14325 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14326 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14327 CstLow, CstLow, CstLow, CstLow};
14328 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14329 makeArrayRef(&CstLowArray[0], NumElts));
14330 // Create the splat vector for 0x53000000.
14331 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14332 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14333 CstHigh, CstHigh, CstHigh, CstHigh};
14334 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14335 makeArrayRef(&CstHighArray[0], NumElts));
14337 // Create the right shift.
14338 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14339 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14340 CstShift, CstShift, CstShift, CstShift};
14341 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14342 makeArrayRef(&CstShiftArray[0], NumElts));
14343 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14346 if (Subtarget.hasSSE41()) {
14347 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14348 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14349 SDValue VecCstLowBitcast =
14350 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14351 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14352 // Low will be bitcasted right away, so do not bother bitcasting back to its
14354 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14355 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14356 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14357 // (uint4) 0x53000000, 0xaa);
14358 SDValue VecCstHighBitcast =
14359 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14360 SDValue VecShiftBitcast =
14361 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14362 // High will be bitcasted right away, so do not bother bitcasting back to
14363 // its original type.
14364 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14365 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14367 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14368 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14369 CstMask, CstMask, CstMask);
14370 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14371 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14372 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14374 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14375 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14378 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14379 SDValue CstFAdd = DAG.getConstantFP(
14380 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14381 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14382 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14383 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14384 makeArrayRef(&CstFAddArray[0], NumElts));
14386 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14387 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14389 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14390 // return (float4) lo + fhi;
14391 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14392 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14395 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14396 SelectionDAG &DAG) const {
14397 SDValue N0 = Op.getOperand(0);
14398 MVT SVT = N0.getSimpleValueType();
14401 switch (SVT.SimpleTy) {
14403 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14408 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14409 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14410 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14414 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14416 llvm_unreachable(nullptr);
14419 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14420 SelectionDAG &DAG) const {
14421 SDValue N0 = Op.getOperand(0);
14424 if (Op.getValueType().isVector())
14425 return lowerUINT_TO_FP_vec(Op, DAG);
14427 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14428 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14429 // the optimization here.
14430 if (DAG.SignBitIsZero(N0))
14431 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14433 MVT SrcVT = N0.getSimpleValueType();
14434 MVT DstVT = Op.getSimpleValueType();
14435 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14436 return LowerUINT_TO_FP_i64(Op, DAG);
14437 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14438 return LowerUINT_TO_FP_i32(Op, DAG);
14439 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14442 // Make a 64-bit buffer, and use it to build an FILD.
14443 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14444 if (SrcVT == MVT::i32) {
14445 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14446 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14447 getPointerTy(), StackSlot, WordOff);
14448 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14449 StackSlot, MachinePointerInfo(),
14451 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14452 OffsetSlot, MachinePointerInfo(),
14454 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14458 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14459 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14460 StackSlot, MachinePointerInfo(),
14462 // For i64 source, we need to add the appropriate power of 2 if the input
14463 // was negative. This is the same as the optimization in
14464 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14465 // we must be careful to do the computation in x87 extended precision, not
14466 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14467 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14468 MachineMemOperand *MMO =
14469 DAG.getMachineFunction()
14470 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14471 MachineMemOperand::MOLoad, 8, 8);
14473 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14474 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14475 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14478 APInt FF(32, 0x5F800000ULL);
14480 // Check whether the sign bit is set.
14481 SDValue SignSet = DAG.getSetCC(dl,
14482 getSetCCResultType(*DAG.getContext(), MVT::i64),
14483 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14486 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14487 SDValue FudgePtr = DAG.getConstantPool(
14488 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14491 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14492 SDValue Zero = DAG.getIntPtrConstant(0);
14493 SDValue Four = DAG.getIntPtrConstant(4);
14494 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14496 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14498 // Load the value out, extending it from f32 to f80.
14499 // FIXME: Avoid the extend by constructing the right constant pool?
14500 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14501 FudgePtr, MachinePointerInfo::getConstantPool(),
14502 MVT::f32, false, false, false, 4);
14503 // Extend everything to 80 bits to force it to be done on x87.
14504 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14505 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14508 std::pair<SDValue,SDValue>
14509 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14510 bool IsSigned, bool IsReplace) const {
14513 EVT DstTy = Op.getValueType();
14515 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14516 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14520 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14521 DstTy.getSimpleVT() >= MVT::i16 &&
14522 "Unknown FP_TO_INT to lower!");
14524 // These are really Legal.
14525 if (DstTy == MVT::i32 &&
14526 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14527 return std::make_pair(SDValue(), SDValue());
14528 if (Subtarget->is64Bit() &&
14529 DstTy == MVT::i64 &&
14530 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14531 return std::make_pair(SDValue(), SDValue());
14533 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14534 // stack slot, or into the FTOL runtime function.
14535 MachineFunction &MF = DAG.getMachineFunction();
14536 unsigned MemSize = DstTy.getSizeInBits()/8;
14537 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14538 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14541 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14542 Opc = X86ISD::WIN_FTOL;
14544 switch (DstTy.getSimpleVT().SimpleTy) {
14545 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14546 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14547 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14548 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14551 SDValue Chain = DAG.getEntryNode();
14552 SDValue Value = Op.getOperand(0);
14553 EVT TheVT = Op.getOperand(0).getValueType();
14554 // FIXME This causes a redundant load/store if the SSE-class value is already
14555 // in memory, such as if it is on the callstack.
14556 if (isScalarFPTypeInSSEReg(TheVT)) {
14557 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14558 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14559 MachinePointerInfo::getFixedStack(SSFI),
14561 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14563 Chain, StackSlot, DAG.getValueType(TheVT)
14566 MachineMemOperand *MMO =
14567 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14568 MachineMemOperand::MOLoad, MemSize, MemSize);
14569 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14570 Chain = Value.getValue(1);
14571 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14572 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14575 MachineMemOperand *MMO =
14576 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14577 MachineMemOperand::MOStore, MemSize, MemSize);
14579 if (Opc != X86ISD::WIN_FTOL) {
14580 // Build the FP_TO_INT*_IN_MEM
14581 SDValue Ops[] = { Chain, Value, StackSlot };
14582 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14584 return std::make_pair(FIST, StackSlot);
14586 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14587 DAG.getVTList(MVT::Other, MVT::Glue),
14589 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14590 MVT::i32, ftol.getValue(1));
14591 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14592 MVT::i32, eax.getValue(2));
14593 SDValue Ops[] = { eax, edx };
14594 SDValue pair = IsReplace
14595 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14596 : DAG.getMergeValues(Ops, DL);
14597 return std::make_pair(pair, SDValue());
14601 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14602 const X86Subtarget *Subtarget) {
14603 MVT VT = Op->getSimpleValueType(0);
14604 SDValue In = Op->getOperand(0);
14605 MVT InVT = In.getSimpleValueType();
14608 // Optimize vectors in AVX mode:
14611 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14612 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14613 // Concat upper and lower parts.
14616 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14617 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14618 // Concat upper and lower parts.
14621 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14622 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14623 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14626 if (Subtarget->hasInt256())
14627 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14629 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14630 SDValue Undef = DAG.getUNDEF(InVT);
14631 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14632 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14633 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14635 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14636 VT.getVectorNumElements()/2);
14638 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14639 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14641 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14644 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14645 SelectionDAG &DAG) {
14646 MVT VT = Op->getSimpleValueType(0);
14647 SDValue In = Op->getOperand(0);
14648 MVT InVT = In.getSimpleValueType();
14650 unsigned int NumElts = VT.getVectorNumElements();
14651 if (NumElts != 8 && NumElts != 16)
14654 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14655 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14657 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14658 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14659 // Now we have only mask extension
14660 assert(InVT.getVectorElementType() == MVT::i1);
14661 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14662 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14663 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14664 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14665 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14666 MachinePointerInfo::getConstantPool(),
14667 false, false, false, Alignment);
14669 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14670 if (VT.is512BitVector())
14672 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14675 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14676 SelectionDAG &DAG) {
14677 if (Subtarget->hasFp256()) {
14678 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14686 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14687 SelectionDAG &DAG) {
14689 MVT VT = Op.getSimpleValueType();
14690 SDValue In = Op.getOperand(0);
14691 MVT SVT = In.getSimpleValueType();
14693 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14694 return LowerZERO_EXTEND_AVX512(Op, DAG);
14696 if (Subtarget->hasFp256()) {
14697 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14702 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14703 VT.getVectorNumElements() != SVT.getVectorNumElements());
14707 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14709 MVT VT = Op.getSimpleValueType();
14710 SDValue In = Op.getOperand(0);
14711 MVT InVT = In.getSimpleValueType();
14713 if (VT == MVT::i1) {
14714 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14715 "Invalid scalar TRUNCATE operation");
14716 if (InVT.getSizeInBits() >= 32)
14718 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14719 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14721 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14722 "Invalid TRUNCATE operation");
14724 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14725 if (VT.getVectorElementType().getSizeInBits() >=8)
14726 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14728 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14729 unsigned NumElts = InVT.getVectorNumElements();
14730 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14731 if (InVT.getSizeInBits() < 512) {
14732 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14733 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14737 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14738 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14739 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14740 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14741 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14742 MachinePointerInfo::getConstantPool(),
14743 false, false, false, Alignment);
14744 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14745 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14746 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14749 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14750 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14751 if (Subtarget->hasInt256()) {
14752 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14753 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14754 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14756 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14757 DAG.getIntPtrConstant(0));
14760 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14761 DAG.getIntPtrConstant(0));
14762 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14763 DAG.getIntPtrConstant(2));
14764 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14765 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14766 static const int ShufMask[] = {0, 2, 4, 6};
14767 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14770 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14771 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14772 if (Subtarget->hasInt256()) {
14773 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14775 SmallVector<SDValue,32> pshufbMask;
14776 for (unsigned i = 0; i < 2; ++i) {
14777 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14778 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14779 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14780 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14781 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14782 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14783 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14784 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14785 for (unsigned j = 0; j < 8; ++j)
14786 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14788 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14789 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14790 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14792 static const int ShufMask[] = {0, 2, -1, -1};
14793 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14795 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14796 DAG.getIntPtrConstant(0));
14797 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14800 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14801 DAG.getIntPtrConstant(0));
14803 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14804 DAG.getIntPtrConstant(4));
14806 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14807 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14809 // The PSHUFB mask:
14810 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14811 -1, -1, -1, -1, -1, -1, -1, -1};
14813 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14814 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14815 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14817 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14818 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14820 // The MOVLHPS Mask:
14821 static const int ShufMask2[] = {0, 1, 4, 5};
14822 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14823 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14826 // Handle truncation of V256 to V128 using shuffles.
14827 if (!VT.is128BitVector() || !InVT.is256BitVector())
14830 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14832 unsigned NumElems = VT.getVectorNumElements();
14833 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14835 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14836 // Prepare truncation shuffle mask
14837 for (unsigned i = 0; i != NumElems; ++i)
14838 MaskVec[i] = i * 2;
14839 SDValue V = DAG.getVectorShuffle(NVT, DL,
14840 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14841 DAG.getUNDEF(NVT), &MaskVec[0]);
14842 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14843 DAG.getIntPtrConstant(0));
14846 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14847 SelectionDAG &DAG) const {
14848 assert(!Op.getSimpleValueType().isVector());
14850 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14851 /*IsSigned=*/ true, /*IsReplace=*/ false);
14852 SDValue FIST = Vals.first, StackSlot = Vals.second;
14853 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14854 if (!FIST.getNode()) return Op;
14856 if (StackSlot.getNode())
14857 // Load the result.
14858 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14859 FIST, StackSlot, MachinePointerInfo(),
14860 false, false, false, 0);
14862 // The node is the result.
14866 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14867 SelectionDAG &DAG) const {
14868 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14869 /*IsSigned=*/ false, /*IsReplace=*/ false);
14870 SDValue FIST = Vals.first, StackSlot = Vals.second;
14871 assert(FIST.getNode() && "Unexpected failure");
14873 if (StackSlot.getNode())
14874 // Load the result.
14875 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14876 FIST, StackSlot, MachinePointerInfo(),
14877 false, false, false, 0);
14879 // The node is the result.
14883 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14885 MVT VT = Op.getSimpleValueType();
14886 SDValue In = Op.getOperand(0);
14887 MVT SVT = In.getSimpleValueType();
14889 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14891 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14892 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14893 In, DAG.getUNDEF(SVT)));
14896 /// The only differences between FABS and FNEG are the mask and the logic op.
14897 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14898 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14899 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14900 "Wrong opcode for lowering FABS or FNEG.");
14902 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14904 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14905 // into an FNABS. We'll lower the FABS after that if it is still in use.
14907 for (SDNode *User : Op->uses())
14908 if (User->getOpcode() == ISD::FNEG)
14911 SDValue Op0 = Op.getOperand(0);
14912 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
14915 MVT VT = Op.getSimpleValueType();
14916 // Assume scalar op for initialization; update for vector if needed.
14917 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
14918 // generate a 16-byte vector constant and logic op even for the scalar case.
14919 // Using a 16-byte mask allows folding the load of the mask with
14920 // the logic op, so it can save (~4 bytes) on code size.
14922 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
14923 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
14924 // decide if we should generate a 16-byte constant mask when we only need 4 or
14925 // 8 bytes for the scalar case.
14926 if (VT.isVector()) {
14927 EltVT = VT.getVectorElementType();
14928 NumElts = VT.getVectorNumElements();
14931 unsigned EltBits = EltVT.getSizeInBits();
14932 LLVMContext *Context = DAG.getContext();
14933 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
14935 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
14936 Constant *C = ConstantInt::get(*Context, MaskElt);
14937 C = ConstantVector::getSplat(NumElts, C);
14938 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14939 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
14940 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
14941 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14942 MachinePointerInfo::getConstantPool(),
14943 false, false, false, Alignment);
14945 if (VT.isVector()) {
14946 // For a vector, cast operands to a vector type, perform the logic op,
14947 // and cast the result back to the original value type.
14948 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
14949 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
14950 SDValue Operand = IsFNABS ?
14951 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
14952 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
14953 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
14954 return DAG.getNode(ISD::BITCAST, dl, VT,
14955 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
14958 // If not vector, then scalar.
14959 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
14960 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
14961 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
14964 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
14965 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14966 LLVMContext *Context = DAG.getContext();
14967 SDValue Op0 = Op.getOperand(0);
14968 SDValue Op1 = Op.getOperand(1);
14970 MVT VT = Op.getSimpleValueType();
14971 MVT SrcVT = Op1.getSimpleValueType();
14973 // If second operand is smaller, extend it first.
14974 if (SrcVT.bitsLT(VT)) {
14975 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
14978 // And if it is bigger, shrink it first.
14979 if (SrcVT.bitsGT(VT)) {
14980 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
14984 // At this point the operands and the result should have the same
14985 // type, and that won't be f80 since that is not custom lowered.
14987 const fltSemantics &Sem =
14988 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
14989 const unsigned SizeInBits = VT.getSizeInBits();
14991 SmallVector<Constant *, 4> CV(
14992 VT == MVT::f64 ? 2 : 4,
14993 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
14995 // First, clear all bits but the sign bit from the second operand (sign).
14996 CV[0] = ConstantFP::get(*Context,
14997 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
14998 Constant *C = ConstantVector::get(CV);
14999 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15000 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
15001 MachinePointerInfo::getConstantPool(),
15002 false, false, false, 16);
15003 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
15005 // Next, clear the sign bit from the first operand (magnitude).
15006 // If it's a constant, we can clear it here.
15007 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
15008 APFloat APF = Op0CN->getValueAPF();
15009 // If the magnitude is a positive zero, the sign bit alone is enough.
15010 if (APF.isPosZero())
15013 CV[0] = ConstantFP::get(*Context, APF);
15015 CV[0] = ConstantFP::get(
15017 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
15019 C = ConstantVector::get(CV);
15020 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15021 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15022 MachinePointerInfo::getConstantPool(),
15023 false, false, false, 16);
15024 // If the magnitude operand wasn't a constant, we need to AND out the sign.
15025 if (!isa<ConstantFPSDNode>(Op0))
15026 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
15028 // OR the magnitude value with the sign bit.
15029 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
15032 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
15033 SDValue N0 = Op.getOperand(0);
15035 MVT VT = Op.getSimpleValueType();
15037 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
15038 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
15039 DAG.getConstant(1, VT));
15040 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
15043 // Check whether an OR'd tree is PTEST-able.
15044 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
15045 SelectionDAG &DAG) {
15046 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
15048 if (!Subtarget->hasSSE41())
15051 if (!Op->hasOneUse())
15054 SDNode *N = Op.getNode();
15057 SmallVector<SDValue, 8> Opnds;
15058 DenseMap<SDValue, unsigned> VecInMap;
15059 SmallVector<SDValue, 8> VecIns;
15060 EVT VT = MVT::Other;
15062 // Recognize a special case where a vector is casted into wide integer to
15064 Opnds.push_back(N->getOperand(0));
15065 Opnds.push_back(N->getOperand(1));
15067 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
15068 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
15069 // BFS traverse all OR'd operands.
15070 if (I->getOpcode() == ISD::OR) {
15071 Opnds.push_back(I->getOperand(0));
15072 Opnds.push_back(I->getOperand(1));
15073 // Re-evaluate the number of nodes to be traversed.
15074 e += 2; // 2 more nodes (LHS and RHS) are pushed.
15078 // Quit if a non-EXTRACT_VECTOR_ELT
15079 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15082 // Quit if without a constant index.
15083 SDValue Idx = I->getOperand(1);
15084 if (!isa<ConstantSDNode>(Idx))
15087 SDValue ExtractedFromVec = I->getOperand(0);
15088 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
15089 if (M == VecInMap.end()) {
15090 VT = ExtractedFromVec.getValueType();
15091 // Quit if not 128/256-bit vector.
15092 if (!VT.is128BitVector() && !VT.is256BitVector())
15094 // Quit if not the same type.
15095 if (VecInMap.begin() != VecInMap.end() &&
15096 VT != VecInMap.begin()->first.getValueType())
15098 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
15099 VecIns.push_back(ExtractedFromVec);
15101 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
15104 assert((VT.is128BitVector() || VT.is256BitVector()) &&
15105 "Not extracted from 128-/256-bit vector.");
15107 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
15109 for (DenseMap<SDValue, unsigned>::const_iterator
15110 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
15111 // Quit if not all elements are used.
15112 if (I->second != FullMask)
15116 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
15118 // Cast all vectors into TestVT for PTEST.
15119 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
15120 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
15122 // If more than one full vectors are evaluated, OR them first before PTEST.
15123 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
15124 // Each iteration will OR 2 nodes and append the result until there is only
15125 // 1 node left, i.e. the final OR'd value of all vectors.
15126 SDValue LHS = VecIns[Slot];
15127 SDValue RHS = VecIns[Slot + 1];
15128 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
15131 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
15132 VecIns.back(), VecIns.back());
15135 /// \brief return true if \c Op has a use that doesn't just read flags.
15136 static bool hasNonFlagsUse(SDValue Op) {
15137 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
15139 SDNode *User = *UI;
15140 unsigned UOpNo = UI.getOperandNo();
15141 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
15142 // Look pass truncate.
15143 UOpNo = User->use_begin().getOperandNo();
15144 User = *User->use_begin();
15147 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
15148 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
15154 /// Emit nodes that will be selected as "test Op0,Op0", or something
15156 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
15157 SelectionDAG &DAG) const {
15158 if (Op.getValueType() == MVT::i1) {
15159 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
15160 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
15161 DAG.getConstant(0, MVT::i8));
15163 // CF and OF aren't always set the way we want. Determine which
15164 // of these we need.
15165 bool NeedCF = false;
15166 bool NeedOF = false;
15169 case X86::COND_A: case X86::COND_AE:
15170 case X86::COND_B: case X86::COND_BE:
15173 case X86::COND_G: case X86::COND_GE:
15174 case X86::COND_L: case X86::COND_LE:
15175 case X86::COND_O: case X86::COND_NO: {
15176 // Check if we really need to set the
15177 // Overflow flag. If NoSignedWrap is present
15178 // that is not actually needed.
15179 switch (Op->getOpcode()) {
15184 const BinaryWithFlagsSDNode *BinNode =
15185 cast<BinaryWithFlagsSDNode>(Op.getNode());
15186 if (BinNode->hasNoSignedWrap())
15196 // See if we can use the EFLAGS value from the operand instead of
15197 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
15198 // we prove that the arithmetic won't overflow, we can't use OF or CF.
15199 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
15200 // Emit a CMP with 0, which is the TEST pattern.
15201 //if (Op.getValueType() == MVT::i1)
15202 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
15203 // DAG.getConstant(0, MVT::i1));
15204 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15205 DAG.getConstant(0, Op.getValueType()));
15207 unsigned Opcode = 0;
15208 unsigned NumOperands = 0;
15210 // Truncate operations may prevent the merge of the SETCC instruction
15211 // and the arithmetic instruction before it. Attempt to truncate the operands
15212 // of the arithmetic instruction and use a reduced bit-width instruction.
15213 bool NeedTruncation = false;
15214 SDValue ArithOp = Op;
15215 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
15216 SDValue Arith = Op->getOperand(0);
15217 // Both the trunc and the arithmetic op need to have one user each.
15218 if (Arith->hasOneUse())
15219 switch (Arith.getOpcode()) {
15226 NeedTruncation = true;
15232 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
15233 // which may be the result of a CAST. We use the variable 'Op', which is the
15234 // non-casted variable when we check for possible users.
15235 switch (ArithOp.getOpcode()) {
15237 // Due to an isel shortcoming, be conservative if this add is likely to be
15238 // selected as part of a load-modify-store instruction. When the root node
15239 // in a match is a store, isel doesn't know how to remap non-chain non-flag
15240 // uses of other nodes in the match, such as the ADD in this case. This
15241 // leads to the ADD being left around and reselected, with the result being
15242 // two adds in the output. Alas, even if none our users are stores, that
15243 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
15244 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
15245 // climbing the DAG back to the root, and it doesn't seem to be worth the
15247 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15248 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15249 if (UI->getOpcode() != ISD::CopyToReg &&
15250 UI->getOpcode() != ISD::SETCC &&
15251 UI->getOpcode() != ISD::STORE)
15254 if (ConstantSDNode *C =
15255 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
15256 // An add of one will be selected as an INC.
15257 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
15258 Opcode = X86ISD::INC;
15263 // An add of negative one (subtract of one) will be selected as a DEC.
15264 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
15265 Opcode = X86ISD::DEC;
15271 // Otherwise use a regular EFLAGS-setting add.
15272 Opcode = X86ISD::ADD;
15277 // If we have a constant logical shift that's only used in a comparison
15278 // against zero turn it into an equivalent AND. This allows turning it into
15279 // a TEST instruction later.
15280 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
15281 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
15282 EVT VT = Op.getValueType();
15283 unsigned BitWidth = VT.getSizeInBits();
15284 unsigned ShAmt = Op->getConstantOperandVal(1);
15285 if (ShAmt >= BitWidth) // Avoid undefined shifts.
15287 APInt Mask = ArithOp.getOpcode() == ISD::SRL
15288 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
15289 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
15290 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
15292 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
15293 DAG.getConstant(Mask, VT));
15294 DAG.ReplaceAllUsesWith(Op, New);
15300 // If the primary and result isn't used, don't bother using X86ISD::AND,
15301 // because a TEST instruction will be better.
15302 if (!hasNonFlagsUse(Op))
15308 // Due to the ISEL shortcoming noted above, be conservative if this op is
15309 // likely to be selected as part of a load-modify-store instruction.
15310 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15311 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15312 if (UI->getOpcode() == ISD::STORE)
15315 // Otherwise use a regular EFLAGS-setting instruction.
15316 switch (ArithOp.getOpcode()) {
15317 default: llvm_unreachable("unexpected operator!");
15318 case ISD::SUB: Opcode = X86ISD::SUB; break;
15319 case ISD::XOR: Opcode = X86ISD::XOR; break;
15320 case ISD::AND: Opcode = X86ISD::AND; break;
15322 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15323 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15324 if (EFLAGS.getNode())
15327 Opcode = X86ISD::OR;
15341 return SDValue(Op.getNode(), 1);
15347 // If we found that truncation is beneficial, perform the truncation and
15349 if (NeedTruncation) {
15350 EVT VT = Op.getValueType();
15351 SDValue WideVal = Op->getOperand(0);
15352 EVT WideVT = WideVal.getValueType();
15353 unsigned ConvertedOp = 0;
15354 // Use a target machine opcode to prevent further DAGCombine
15355 // optimizations that may separate the arithmetic operations
15356 // from the setcc node.
15357 switch (WideVal.getOpcode()) {
15359 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15360 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15361 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15362 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15363 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15367 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15368 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15369 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15370 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15371 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15377 // Emit a CMP with 0, which is the TEST pattern.
15378 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15379 DAG.getConstant(0, Op.getValueType()));
15381 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15382 SmallVector<SDValue, 4> Ops(Op->op_begin(), Op->op_begin() + NumOperands);
15384 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15385 DAG.ReplaceAllUsesWith(Op, New);
15386 return SDValue(New.getNode(), 1);
15389 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15391 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15392 SDLoc dl, SelectionDAG &DAG) const {
15393 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15394 if (C->getAPIntValue() == 0)
15395 return EmitTest(Op0, X86CC, dl, DAG);
15397 if (Op0.getValueType() == MVT::i1)
15398 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15401 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15402 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15403 // Do the comparison at i32 if it's smaller, besides the Atom case.
15404 // This avoids subregister aliasing issues. Keep the smaller reference
15405 // if we're optimizing for size, however, as that'll allow better folding
15406 // of memory operations.
15407 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15408 !DAG.getMachineFunction().getFunction()->hasFnAttribute(
15409 Attribute::MinSize) &&
15410 !Subtarget->isAtom()) {
15411 unsigned ExtendOp =
15412 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15413 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15414 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15416 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15417 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15418 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15420 return SDValue(Sub.getNode(), 1);
15422 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15425 /// Convert a comparison if required by the subtarget.
15426 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15427 SelectionDAG &DAG) const {
15428 // If the subtarget does not support the FUCOMI instruction, floating-point
15429 // comparisons have to be converted.
15430 if (Subtarget->hasCMov() ||
15431 Cmp.getOpcode() != X86ISD::CMP ||
15432 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15433 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15436 // The instruction selector will select an FUCOM instruction instead of
15437 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15438 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15439 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15441 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15442 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15443 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15444 DAG.getConstant(8, MVT::i8));
15445 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15446 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15449 /// The minimum architected relative accuracy is 2^-12. We need one
15450 /// Newton-Raphson step to have a good float result (24 bits of precision).
15451 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15452 DAGCombinerInfo &DCI,
15453 unsigned &RefinementSteps,
15454 bool &UseOneConstNR) const {
15455 // FIXME: We should use instruction latency models to calculate the cost of
15456 // each potential sequence, but this is very hard to do reliably because
15457 // at least Intel's Core* chips have variable timing based on the number of
15458 // significant digits in the divisor and/or sqrt operand.
15459 if (!Subtarget->useSqrtEst())
15462 EVT VT = Op.getValueType();
15464 // SSE1 has rsqrtss and rsqrtps.
15465 // TODO: Add support for AVX512 (v16f32).
15466 // It is likely not profitable to do this for f64 because a double-precision
15467 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15468 // instructions: convert to single, rsqrtss, convert back to double, refine
15469 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15470 // along with FMA, this could be a throughput win.
15471 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15472 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15473 RefinementSteps = 1;
15474 UseOneConstNR = false;
15475 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15480 /// The minimum architected relative accuracy is 2^-12. We need one
15481 /// Newton-Raphson step to have a good float result (24 bits of precision).
15482 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15483 DAGCombinerInfo &DCI,
15484 unsigned &RefinementSteps) const {
15485 // FIXME: We should use instruction latency models to calculate the cost of
15486 // each potential sequence, but this is very hard to do reliably because
15487 // at least Intel's Core* chips have variable timing based on the number of
15488 // significant digits in the divisor.
15489 if (!Subtarget->useReciprocalEst())
15492 EVT VT = Op.getValueType();
15494 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15495 // TODO: Add support for AVX512 (v16f32).
15496 // It is likely not profitable to do this for f64 because a double-precision
15497 // reciprocal estimate with refinement on x86 prior to FMA requires
15498 // 15 instructions: convert to single, rcpss, convert back to double, refine
15499 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15500 // along with FMA, this could be a throughput win.
15501 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15502 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15503 RefinementSteps = ReciprocalEstimateRefinementSteps;
15504 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15509 static bool isAllOnes(SDValue V) {
15510 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15511 return C && C->isAllOnesValue();
15514 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15515 /// if it's possible.
15516 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15517 SDLoc dl, SelectionDAG &DAG) const {
15518 SDValue Op0 = And.getOperand(0);
15519 SDValue Op1 = And.getOperand(1);
15520 if (Op0.getOpcode() == ISD::TRUNCATE)
15521 Op0 = Op0.getOperand(0);
15522 if (Op1.getOpcode() == ISD::TRUNCATE)
15523 Op1 = Op1.getOperand(0);
15526 if (Op1.getOpcode() == ISD::SHL)
15527 std::swap(Op0, Op1);
15528 if (Op0.getOpcode() == ISD::SHL) {
15529 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15530 if (And00C->getZExtValue() == 1) {
15531 // If we looked past a truncate, check that it's only truncating away
15533 unsigned BitWidth = Op0.getValueSizeInBits();
15534 unsigned AndBitWidth = And.getValueSizeInBits();
15535 if (BitWidth > AndBitWidth) {
15537 DAG.computeKnownBits(Op0, Zeros, Ones);
15538 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15542 RHS = Op0.getOperand(1);
15544 } else if (Op1.getOpcode() == ISD::Constant) {
15545 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15546 uint64_t AndRHSVal = AndRHS->getZExtValue();
15547 SDValue AndLHS = Op0;
15549 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15550 LHS = AndLHS.getOperand(0);
15551 RHS = AndLHS.getOperand(1);
15554 // Use BT if the immediate can't be encoded in a TEST instruction.
15555 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15557 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15561 if (LHS.getNode()) {
15562 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15563 // instruction. Since the shift amount is in-range-or-undefined, we know
15564 // that doing a bittest on the i32 value is ok. We extend to i32 because
15565 // the encoding for the i16 version is larger than the i32 version.
15566 // Also promote i16 to i32 for performance / code size reason.
15567 if (LHS.getValueType() == MVT::i8 ||
15568 LHS.getValueType() == MVT::i16)
15569 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15571 // If the operand types disagree, extend the shift amount to match. Since
15572 // BT ignores high bits (like shifts) we can use anyextend.
15573 if (LHS.getValueType() != RHS.getValueType())
15574 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15576 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15577 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15578 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15579 DAG.getConstant(Cond, MVT::i8), BT);
15585 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15587 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15592 // SSE Condition code mapping:
15601 switch (SetCCOpcode) {
15602 default: llvm_unreachable("Unexpected SETCC condition");
15604 case ISD::SETEQ: SSECC = 0; break;
15606 case ISD::SETGT: Swap = true; // Fallthrough
15608 case ISD::SETOLT: SSECC = 1; break;
15610 case ISD::SETGE: Swap = true; // Fallthrough
15612 case ISD::SETOLE: SSECC = 2; break;
15613 case ISD::SETUO: SSECC = 3; break;
15615 case ISD::SETNE: SSECC = 4; break;
15616 case ISD::SETULE: Swap = true; // Fallthrough
15617 case ISD::SETUGE: SSECC = 5; break;
15618 case ISD::SETULT: Swap = true; // Fallthrough
15619 case ISD::SETUGT: SSECC = 6; break;
15620 case ISD::SETO: SSECC = 7; break;
15622 case ISD::SETONE: SSECC = 8; break;
15625 std::swap(Op0, Op1);
15630 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15631 // ones, and then concatenate the result back.
15632 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15633 MVT VT = Op.getSimpleValueType();
15635 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15636 "Unsupported value type for operation");
15638 unsigned NumElems = VT.getVectorNumElements();
15640 SDValue CC = Op.getOperand(2);
15642 // Extract the LHS vectors
15643 SDValue LHS = Op.getOperand(0);
15644 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15645 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15647 // Extract the RHS vectors
15648 SDValue RHS = Op.getOperand(1);
15649 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15650 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15652 // Issue the operation on the smaller types and concatenate the result back
15653 MVT EltVT = VT.getVectorElementType();
15654 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15655 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15656 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15657 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15660 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15661 const X86Subtarget *Subtarget) {
15662 SDValue Op0 = Op.getOperand(0);
15663 SDValue Op1 = Op.getOperand(1);
15664 SDValue CC = Op.getOperand(2);
15665 MVT VT = Op.getSimpleValueType();
15668 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15669 Op.getValueType().getScalarType() == MVT::i1 &&
15670 "Cannot set masked compare for this operation");
15672 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15674 bool Unsigned = false;
15677 switch (SetCCOpcode) {
15678 default: llvm_unreachable("Unexpected SETCC condition");
15679 case ISD::SETNE: SSECC = 4; break;
15680 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15681 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15682 case ISD::SETLT: Swap = true; //fall-through
15683 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15684 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15685 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15686 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15687 case ISD::SETULE: Unsigned = true; //fall-through
15688 case ISD::SETLE: SSECC = 2; break;
15692 std::swap(Op0, Op1);
15694 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15695 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15696 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15697 DAG.getConstant(SSECC, MVT::i8));
15700 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15701 /// operand \p Op1. If non-trivial (for example because it's not constant)
15702 /// return an empty value.
15703 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15705 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15709 MVT VT = Op1.getSimpleValueType();
15710 MVT EVT = VT.getVectorElementType();
15711 unsigned n = VT.getVectorNumElements();
15712 SmallVector<SDValue, 8> ULTOp1;
15714 for (unsigned i = 0; i < n; ++i) {
15715 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15716 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15719 // Avoid underflow.
15720 APInt Val = Elt->getAPIntValue();
15724 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15727 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15730 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15731 SelectionDAG &DAG) {
15732 SDValue Op0 = Op.getOperand(0);
15733 SDValue Op1 = Op.getOperand(1);
15734 SDValue CC = Op.getOperand(2);
15735 MVT VT = Op.getSimpleValueType();
15736 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15737 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15742 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15743 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15746 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15747 unsigned Opc = X86ISD::CMPP;
15748 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15749 assert(VT.getVectorNumElements() <= 16);
15750 Opc = X86ISD::CMPM;
15752 // In the two special cases we can't handle, emit two comparisons.
15755 unsigned CombineOpc;
15756 if (SetCCOpcode == ISD::SETUEQ) {
15757 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15759 assert(SetCCOpcode == ISD::SETONE);
15760 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15763 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15764 DAG.getConstant(CC0, MVT::i8));
15765 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15766 DAG.getConstant(CC1, MVT::i8));
15767 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15769 // Handle all other FP comparisons here.
15770 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15771 DAG.getConstant(SSECC, MVT::i8));
15774 // Break 256-bit integer vector compare into smaller ones.
15775 if (VT.is256BitVector() && !Subtarget->hasInt256())
15776 return Lower256IntVSETCC(Op, DAG);
15778 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15779 EVT OpVT = Op1.getValueType();
15780 if (Subtarget->hasAVX512()) {
15781 if (Op1.getValueType().is512BitVector() ||
15782 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15783 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15784 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15786 // In AVX-512 architecture setcc returns mask with i1 elements,
15787 // But there is no compare instruction for i8 and i16 elements in KNL.
15788 // We are not talking about 512-bit operands in this case, these
15789 // types are illegal.
15791 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15792 OpVT.getVectorElementType().getSizeInBits() >= 8))
15793 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15794 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15797 // We are handling one of the integer comparisons here. Since SSE only has
15798 // GT and EQ comparisons for integer, swapping operands and multiple
15799 // operations may be required for some comparisons.
15801 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15802 bool Subus = false;
15804 switch (SetCCOpcode) {
15805 default: llvm_unreachable("Unexpected SETCC condition");
15806 case ISD::SETNE: Invert = true;
15807 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15808 case ISD::SETLT: Swap = true;
15809 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15810 case ISD::SETGE: Swap = true;
15811 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15812 Invert = true; break;
15813 case ISD::SETULT: Swap = true;
15814 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15815 FlipSigns = true; break;
15816 case ISD::SETUGE: Swap = true;
15817 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15818 FlipSigns = true; Invert = true; break;
15821 // Special case: Use min/max operations for SETULE/SETUGE
15822 MVT VET = VT.getVectorElementType();
15824 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15825 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15828 switch (SetCCOpcode) {
15830 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15831 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15834 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15837 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15838 if (!MinMax && hasSubus) {
15839 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15841 // t = psubus Op0, Op1
15842 // pcmpeq t, <0..0>
15843 switch (SetCCOpcode) {
15845 case ISD::SETULT: {
15846 // If the comparison is against a constant we can turn this into a
15847 // setule. With psubus, setule does not require a swap. This is
15848 // beneficial because the constant in the register is no longer
15849 // destructed as the destination so it can be hoisted out of a loop.
15850 // Only do this pre-AVX since vpcmp* is no longer destructive.
15851 if (Subtarget->hasAVX())
15853 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
15854 if (ULEOp1.getNode()) {
15856 Subus = true; Invert = false; Swap = false;
15860 // Psubus is better than flip-sign because it requires no inversion.
15861 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15862 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15866 Opc = X86ISD::SUBUS;
15872 std::swap(Op0, Op1);
15874 // Check that the operation in question is available (most are plain SSE2,
15875 // but PCMPGTQ and PCMPEQQ have different requirements).
15876 if (VT == MVT::v2i64) {
15877 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
15878 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
15880 // First cast everything to the right type.
15881 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15882 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15884 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15885 // bits of the inputs before performing those operations. The lower
15886 // compare is always unsigned.
15889 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
15891 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
15892 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
15893 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
15894 Sign, Zero, Sign, Zero);
15896 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15897 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15899 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15900 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15901 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15903 // Create masks for only the low parts/high parts of the 64 bit integers.
15904 static const int MaskHi[] = { 1, 1, 3, 3 };
15905 static const int MaskLo[] = { 0, 0, 2, 2 };
15906 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
15907 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
15908 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
15910 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
15911 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
15914 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15916 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15919 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
15920 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
15921 // pcmpeqd + pshufd + pand.
15922 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
15924 // First cast everything to the right type.
15925 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15926 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15929 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
15931 // Make sure the lower and upper halves are both all-ones.
15932 static const int Mask[] = { 1, 0, 3, 2 };
15933 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
15934 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
15937 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15939 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15943 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15944 // bits of the inputs before performing those operations.
15946 EVT EltVT = VT.getVectorElementType();
15947 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
15948 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
15949 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
15952 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
15954 // If the logical-not of the result is required, perform that now.
15956 Result = DAG.getNOT(dl, Result, VT);
15959 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
15962 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
15963 getZeroVector(VT, Subtarget, DAG, dl));
15968 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
15970 MVT VT = Op.getSimpleValueType();
15972 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
15974 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
15975 && "SetCC type must be 8-bit or 1-bit integer");
15976 SDValue Op0 = Op.getOperand(0);
15977 SDValue Op1 = Op.getOperand(1);
15979 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15981 // Optimize to BT if possible.
15982 // Lower (X & (1 << N)) == 0 to BT(X, N).
15983 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
15984 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
15985 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
15986 Op1.getOpcode() == ISD::Constant &&
15987 cast<ConstantSDNode>(Op1)->isNullValue() &&
15988 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15989 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
15990 if (NewSetCC.getNode()) {
15992 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
15997 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
15999 if (Op1.getOpcode() == ISD::Constant &&
16000 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
16001 cast<ConstantSDNode>(Op1)->isNullValue()) &&
16002 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16004 // If the input is a setcc, then reuse the input setcc or use a new one with
16005 // the inverted condition.
16006 if (Op0.getOpcode() == X86ISD::SETCC) {
16007 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
16008 bool Invert = (CC == ISD::SETNE) ^
16009 cast<ConstantSDNode>(Op1)->isNullValue();
16013 CCode = X86::GetOppositeBranchCondition(CCode);
16014 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16015 DAG.getConstant(CCode, MVT::i8),
16016 Op0.getOperand(1));
16018 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16022 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
16023 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
16024 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16026 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
16027 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
16030 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
16031 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
16032 if (X86CC == X86::COND_INVALID)
16035 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
16036 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
16037 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16038 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
16040 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16044 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
16045 static bool isX86LogicalCmp(SDValue Op) {
16046 unsigned Opc = Op.getNode()->getOpcode();
16047 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
16048 Opc == X86ISD::SAHF)
16050 if (Op.getResNo() == 1 &&
16051 (Opc == X86ISD::ADD ||
16052 Opc == X86ISD::SUB ||
16053 Opc == X86ISD::ADC ||
16054 Opc == X86ISD::SBB ||
16055 Opc == X86ISD::SMUL ||
16056 Opc == X86ISD::UMUL ||
16057 Opc == X86ISD::INC ||
16058 Opc == X86ISD::DEC ||
16059 Opc == X86ISD::OR ||
16060 Opc == X86ISD::XOR ||
16061 Opc == X86ISD::AND))
16064 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
16070 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
16071 if (V.getOpcode() != ISD::TRUNCATE)
16074 SDValue VOp0 = V.getOperand(0);
16075 unsigned InBits = VOp0.getValueSizeInBits();
16076 unsigned Bits = V.getValueSizeInBits();
16077 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
16080 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
16081 bool addTest = true;
16082 SDValue Cond = Op.getOperand(0);
16083 SDValue Op1 = Op.getOperand(1);
16084 SDValue Op2 = Op.getOperand(2);
16086 EVT VT = Op1.getValueType();
16089 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
16090 // are available. Otherwise fp cmovs get lowered into a less efficient branch
16091 // sequence later on.
16092 if (Cond.getOpcode() == ISD::SETCC &&
16093 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
16094 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
16095 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
16096 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
16097 int SSECC = translateX86FSETCC(
16098 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
16101 if (Subtarget->hasAVX512()) {
16102 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
16103 DAG.getConstant(SSECC, MVT::i8));
16104 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
16106 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
16107 DAG.getConstant(SSECC, MVT::i8));
16108 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
16109 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
16110 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
16114 if (Cond.getOpcode() == ISD::SETCC) {
16115 SDValue NewCond = LowerSETCC(Cond, DAG);
16116 if (NewCond.getNode())
16120 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
16121 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
16122 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
16123 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
16124 if (Cond.getOpcode() == X86ISD::SETCC &&
16125 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
16126 isZero(Cond.getOperand(1).getOperand(1))) {
16127 SDValue Cmp = Cond.getOperand(1);
16129 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
16131 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
16132 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
16133 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
16135 SDValue CmpOp0 = Cmp.getOperand(0);
16136 // Apply further optimizations for special cases
16137 // (select (x != 0), -1, 0) -> neg & sbb
16138 // (select (x == 0), 0, -1) -> neg & sbb
16139 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
16140 if (YC->isNullValue() &&
16141 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
16142 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
16143 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
16144 DAG.getConstant(0, CmpOp0.getValueType()),
16146 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16147 DAG.getConstant(X86::COND_B, MVT::i8),
16148 SDValue(Neg.getNode(), 1));
16152 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
16153 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
16154 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16156 SDValue Res = // Res = 0 or -1.
16157 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16158 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
16160 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
16161 Res = DAG.getNOT(DL, Res, Res.getValueType());
16163 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
16164 if (!N2C || !N2C->isNullValue())
16165 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
16170 // Look past (and (setcc_carry (cmp ...)), 1).
16171 if (Cond.getOpcode() == ISD::AND &&
16172 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16173 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16174 if (C && C->getAPIntValue() == 1)
16175 Cond = Cond.getOperand(0);
16178 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16179 // setting operand in place of the X86ISD::SETCC.
16180 unsigned CondOpcode = Cond.getOpcode();
16181 if (CondOpcode == X86ISD::SETCC ||
16182 CondOpcode == X86ISD::SETCC_CARRY) {
16183 CC = Cond.getOperand(0);
16185 SDValue Cmp = Cond.getOperand(1);
16186 unsigned Opc = Cmp.getOpcode();
16187 MVT VT = Op.getSimpleValueType();
16189 bool IllegalFPCMov = false;
16190 if (VT.isFloatingPoint() && !VT.isVector() &&
16191 !isScalarFPTypeInSSEReg(VT)) // FPStack?
16192 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
16194 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
16195 Opc == X86ISD::BT) { // FIXME
16199 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16200 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16201 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16202 Cond.getOperand(0).getValueType() != MVT::i8)) {
16203 SDValue LHS = Cond.getOperand(0);
16204 SDValue RHS = Cond.getOperand(1);
16205 unsigned X86Opcode;
16208 switch (CondOpcode) {
16209 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16210 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16211 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16212 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16213 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16214 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16215 default: llvm_unreachable("unexpected overflowing operator");
16217 if (CondOpcode == ISD::UMULO)
16218 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16221 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16223 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
16225 if (CondOpcode == ISD::UMULO)
16226 Cond = X86Op.getValue(2);
16228 Cond = X86Op.getValue(1);
16230 CC = DAG.getConstant(X86Cond, MVT::i8);
16235 // Look pass the truncate if the high bits are known zero.
16236 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16237 Cond = Cond.getOperand(0);
16239 // We know the result of AND is compared against zero. Try to match
16241 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16242 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
16243 if (NewSetCC.getNode()) {
16244 CC = NewSetCC.getOperand(0);
16245 Cond = NewSetCC.getOperand(1);
16252 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16253 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
16256 // a < b ? -1 : 0 -> RES = ~setcc_carry
16257 // a < b ? 0 : -1 -> RES = setcc_carry
16258 // a >= b ? -1 : 0 -> RES = setcc_carry
16259 // a >= b ? 0 : -1 -> RES = ~setcc_carry
16260 if (Cond.getOpcode() == X86ISD::SUB) {
16261 Cond = ConvertCmpIfNecessary(Cond, DAG);
16262 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
16264 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
16265 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
16266 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16267 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
16268 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
16269 return DAG.getNOT(DL, Res, Res.getValueType());
16274 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
16275 // widen the cmov and push the truncate through. This avoids introducing a new
16276 // branch during isel and doesn't add any extensions.
16277 if (Op.getValueType() == MVT::i8 &&
16278 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
16279 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
16280 if (T1.getValueType() == T2.getValueType() &&
16281 // Blacklist CopyFromReg to avoid partial register stalls.
16282 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
16283 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
16284 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
16285 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
16289 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
16290 // condition is true.
16291 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
16292 SDValue Ops[] = { Op2, Op1, CC, Cond };
16293 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
16296 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
16297 SelectionDAG &DAG) {
16298 MVT VT = Op->getSimpleValueType(0);
16299 SDValue In = Op->getOperand(0);
16300 MVT InVT = In.getSimpleValueType();
16301 MVT VTElt = VT.getVectorElementType();
16302 MVT InVTElt = InVT.getVectorElementType();
16306 if ((InVTElt == MVT::i1) &&
16307 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
16308 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16310 ((Subtarget->hasBWI() && VT.is512BitVector() &&
16311 VTElt.getSizeInBits() <= 16)) ||
16313 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16314 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16316 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16317 VTElt.getSizeInBits() >= 32))))
16318 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16320 unsigned int NumElts = VT.getVectorNumElements();
16322 if (NumElts != 8 && NumElts != 16)
16325 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16326 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16327 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16328 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16331 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16332 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16334 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16335 Constant *C = ConstantInt::get(*DAG.getContext(),
16336 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16338 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16339 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16340 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16341 MachinePointerInfo::getConstantPool(),
16342 false, false, false, Alignment);
16343 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16344 if (VT.is512BitVector())
16346 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16349 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16350 SelectionDAG &DAG) {
16351 MVT VT = Op->getSimpleValueType(0);
16352 SDValue In = Op->getOperand(0);
16353 MVT InVT = In.getSimpleValueType();
16356 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16357 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16359 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16360 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16361 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16364 if (Subtarget->hasInt256())
16365 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16367 // Optimize vectors in AVX mode
16368 // Sign extend v8i16 to v8i32 and
16371 // Divide input vector into two parts
16372 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16373 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16374 // concat the vectors to original VT
16376 unsigned NumElems = InVT.getVectorNumElements();
16377 SDValue Undef = DAG.getUNDEF(InVT);
16379 SmallVector<int,8> ShufMask1(NumElems, -1);
16380 for (unsigned i = 0; i != NumElems/2; ++i)
16383 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16385 SmallVector<int,8> ShufMask2(NumElems, -1);
16386 for (unsigned i = 0; i != NumElems/2; ++i)
16387 ShufMask2[i] = i + NumElems/2;
16389 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16391 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16392 VT.getVectorNumElements()/2);
16394 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16395 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16397 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16400 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16401 // may emit an illegal shuffle but the expansion is still better than scalar
16402 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16403 // we'll emit a shuffle and a arithmetic shift.
16404 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
16405 // TODO: It is possible to support ZExt by zeroing the undef values during
16406 // the shuffle phase or after the shuffle.
16407 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16408 SelectionDAG &DAG) {
16409 MVT RegVT = Op.getSimpleValueType();
16410 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16411 assert(RegVT.isInteger() &&
16412 "We only custom lower integer vector sext loads.");
16414 // Nothing useful we can do without SSE2 shuffles.
16415 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16417 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16419 EVT MemVT = Ld->getMemoryVT();
16420 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16421 unsigned RegSz = RegVT.getSizeInBits();
16423 ISD::LoadExtType Ext = Ld->getExtensionType();
16425 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16426 && "Only anyext and sext are currently implemented.");
16427 assert(MemVT != RegVT && "Cannot extend to the same type");
16428 assert(MemVT.isVector() && "Must load a vector from memory");
16430 unsigned NumElems = RegVT.getVectorNumElements();
16431 unsigned MemSz = MemVT.getSizeInBits();
16432 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16434 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16435 // The only way in which we have a legal 256-bit vector result but not the
16436 // integer 256-bit operations needed to directly lower a sextload is if we
16437 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16438 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16439 // correctly legalized. We do this late to allow the canonical form of
16440 // sextload to persist throughout the rest of the DAG combiner -- it wants
16441 // to fold together any extensions it can, and so will fuse a sign_extend
16442 // of an sextload into a sextload targeting a wider value.
16444 if (MemSz == 128) {
16445 // Just switch this to a normal load.
16446 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16447 "it must be a legal 128-bit vector "
16449 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16450 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16451 Ld->isInvariant(), Ld->getAlignment());
16453 assert(MemSz < 128 &&
16454 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16455 // Do an sext load to a 128-bit vector type. We want to use the same
16456 // number of elements, but elements half as wide. This will end up being
16457 // recursively lowered by this routine, but will succeed as we definitely
16458 // have all the necessary features if we're using AVX1.
16460 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16461 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16463 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16464 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16465 Ld->isNonTemporal(), Ld->isInvariant(),
16466 Ld->getAlignment());
16469 // Replace chain users with the new chain.
16470 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16471 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16473 // Finally, do a normal sign-extend to the desired register.
16474 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16477 // All sizes must be a power of two.
16478 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16479 "Non-power-of-two elements are not custom lowered!");
16481 // Attempt to load the original value using scalar loads.
16482 // Find the largest scalar type that divides the total loaded size.
16483 MVT SclrLoadTy = MVT::i8;
16484 for (MVT Tp : MVT::integer_valuetypes()) {
16485 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16490 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16491 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16493 SclrLoadTy = MVT::f64;
16495 // Calculate the number of scalar loads that we need to perform
16496 // in order to load our vector from memory.
16497 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16499 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16500 "Can only lower sext loads with a single scalar load!");
16502 unsigned loadRegZize = RegSz;
16503 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16506 // Represent our vector as a sequence of elements which are the
16507 // largest scalar that we can load.
16508 EVT LoadUnitVecVT = EVT::getVectorVT(
16509 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16511 // Represent the data using the same element type that is stored in
16512 // memory. In practice, we ''widen'' MemVT.
16514 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16515 loadRegZize / MemVT.getScalarType().getSizeInBits());
16517 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16518 "Invalid vector type");
16520 // We can't shuffle using an illegal type.
16521 assert(TLI.isTypeLegal(WideVecVT) &&
16522 "We only lower types that form legal widened vector types");
16524 SmallVector<SDValue, 8> Chains;
16525 SDValue Ptr = Ld->getBasePtr();
16526 SDValue Increment =
16527 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16528 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16530 for (unsigned i = 0; i < NumLoads; ++i) {
16531 // Perform a single load.
16532 SDValue ScalarLoad =
16533 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16534 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16535 Ld->getAlignment());
16536 Chains.push_back(ScalarLoad.getValue(1));
16537 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16538 // another round of DAGCombining.
16540 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16542 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16543 ScalarLoad, DAG.getIntPtrConstant(i));
16545 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16548 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16550 // Bitcast the loaded value to a vector of the original element type, in
16551 // the size of the target vector type.
16552 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16553 unsigned SizeRatio = RegSz / MemSz;
16555 if (Ext == ISD::SEXTLOAD) {
16556 // If we have SSE4.1, we can directly emit a VSEXT node.
16557 if (Subtarget->hasSSE41()) {
16558 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16559 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16563 // Otherwise we'll shuffle the small elements in the high bits of the
16564 // larger type and perform an arithmetic shift. If the shift is not legal
16565 // it's better to scalarize.
16566 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16567 "We can't implement a sext load without an arithmetic right shift!");
16569 // Redistribute the loaded elements into the different locations.
16570 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16571 for (unsigned i = 0; i != NumElems; ++i)
16572 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16574 SDValue Shuff = DAG.getVectorShuffle(
16575 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16577 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16579 // Build the arithmetic shift.
16580 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16581 MemVT.getVectorElementType().getSizeInBits();
16583 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16585 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16589 // Redistribute the loaded elements into the different locations.
16590 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16591 for (unsigned i = 0; i != NumElems; ++i)
16592 ShuffleVec[i * SizeRatio] = i;
16594 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16595 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16597 // Bitcast to the requested type.
16598 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16599 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16603 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16604 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16605 // from the AND / OR.
16606 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16607 Opc = Op.getOpcode();
16608 if (Opc != ISD::OR && Opc != ISD::AND)
16610 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16611 Op.getOperand(0).hasOneUse() &&
16612 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16613 Op.getOperand(1).hasOneUse());
16616 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16617 // 1 and that the SETCC node has a single use.
16618 static bool isXor1OfSetCC(SDValue Op) {
16619 if (Op.getOpcode() != ISD::XOR)
16621 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16622 if (N1C && N1C->getAPIntValue() == 1) {
16623 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16624 Op.getOperand(0).hasOneUse();
16629 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16630 bool addTest = true;
16631 SDValue Chain = Op.getOperand(0);
16632 SDValue Cond = Op.getOperand(1);
16633 SDValue Dest = Op.getOperand(2);
16636 bool Inverted = false;
16638 if (Cond.getOpcode() == ISD::SETCC) {
16639 // Check for setcc([su]{add,sub,mul}o == 0).
16640 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16641 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16642 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16643 Cond.getOperand(0).getResNo() == 1 &&
16644 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16645 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16646 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16647 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16648 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16649 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16651 Cond = Cond.getOperand(0);
16653 SDValue NewCond = LowerSETCC(Cond, DAG);
16654 if (NewCond.getNode())
16659 // FIXME: LowerXALUO doesn't handle these!!
16660 else if (Cond.getOpcode() == X86ISD::ADD ||
16661 Cond.getOpcode() == X86ISD::SUB ||
16662 Cond.getOpcode() == X86ISD::SMUL ||
16663 Cond.getOpcode() == X86ISD::UMUL)
16664 Cond = LowerXALUO(Cond, DAG);
16667 // Look pass (and (setcc_carry (cmp ...)), 1).
16668 if (Cond.getOpcode() == ISD::AND &&
16669 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16670 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16671 if (C && C->getAPIntValue() == 1)
16672 Cond = Cond.getOperand(0);
16675 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16676 // setting operand in place of the X86ISD::SETCC.
16677 unsigned CondOpcode = Cond.getOpcode();
16678 if (CondOpcode == X86ISD::SETCC ||
16679 CondOpcode == X86ISD::SETCC_CARRY) {
16680 CC = Cond.getOperand(0);
16682 SDValue Cmp = Cond.getOperand(1);
16683 unsigned Opc = Cmp.getOpcode();
16684 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16685 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16689 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16693 // These can only come from an arithmetic instruction with overflow,
16694 // e.g. SADDO, UADDO.
16695 Cond = Cond.getNode()->getOperand(1);
16701 CondOpcode = Cond.getOpcode();
16702 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16703 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16704 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16705 Cond.getOperand(0).getValueType() != MVT::i8)) {
16706 SDValue LHS = Cond.getOperand(0);
16707 SDValue RHS = Cond.getOperand(1);
16708 unsigned X86Opcode;
16711 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16712 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16714 switch (CondOpcode) {
16715 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16717 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16719 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16722 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16723 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16725 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16727 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16730 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16731 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16732 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16733 default: llvm_unreachable("unexpected overflowing operator");
16736 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16737 if (CondOpcode == ISD::UMULO)
16738 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16741 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16743 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16745 if (CondOpcode == ISD::UMULO)
16746 Cond = X86Op.getValue(2);
16748 Cond = X86Op.getValue(1);
16750 CC = DAG.getConstant(X86Cond, MVT::i8);
16754 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16755 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16756 if (CondOpc == ISD::OR) {
16757 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16758 // two branches instead of an explicit OR instruction with a
16760 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16761 isX86LogicalCmp(Cmp)) {
16762 CC = Cond.getOperand(0).getOperand(0);
16763 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16764 Chain, Dest, CC, Cmp);
16765 CC = Cond.getOperand(1).getOperand(0);
16769 } else { // ISD::AND
16770 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16771 // two branches instead of an explicit AND instruction with a
16772 // separate test. However, we only do this if this block doesn't
16773 // have a fall-through edge, because this requires an explicit
16774 // jmp when the condition is false.
16775 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16776 isX86LogicalCmp(Cmp) &&
16777 Op.getNode()->hasOneUse()) {
16778 X86::CondCode CCode =
16779 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16780 CCode = X86::GetOppositeBranchCondition(CCode);
16781 CC = DAG.getConstant(CCode, MVT::i8);
16782 SDNode *User = *Op.getNode()->use_begin();
16783 // Look for an unconditional branch following this conditional branch.
16784 // We need this because we need to reverse the successors in order
16785 // to implement FCMP_OEQ.
16786 if (User->getOpcode() == ISD::BR) {
16787 SDValue FalseBB = User->getOperand(1);
16789 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16790 assert(NewBR == User);
16794 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16795 Chain, Dest, CC, Cmp);
16796 X86::CondCode CCode =
16797 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16798 CCode = X86::GetOppositeBranchCondition(CCode);
16799 CC = DAG.getConstant(CCode, MVT::i8);
16805 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16806 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16807 // It should be transformed during dag combiner except when the condition
16808 // is set by a arithmetics with overflow node.
16809 X86::CondCode CCode =
16810 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16811 CCode = X86::GetOppositeBranchCondition(CCode);
16812 CC = DAG.getConstant(CCode, MVT::i8);
16813 Cond = Cond.getOperand(0).getOperand(1);
16815 } else if (Cond.getOpcode() == ISD::SETCC &&
16816 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16817 // For FCMP_OEQ, we can emit
16818 // two branches instead of an explicit AND instruction with a
16819 // separate test. However, we only do this if this block doesn't
16820 // have a fall-through edge, because this requires an explicit
16821 // jmp when the condition is false.
16822 if (Op.getNode()->hasOneUse()) {
16823 SDNode *User = *Op.getNode()->use_begin();
16824 // Look for an unconditional branch following this conditional branch.
16825 // We need this because we need to reverse the successors in order
16826 // to implement FCMP_OEQ.
16827 if (User->getOpcode() == ISD::BR) {
16828 SDValue FalseBB = User->getOperand(1);
16830 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16831 assert(NewBR == User);
16835 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16836 Cond.getOperand(0), Cond.getOperand(1));
16837 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16838 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16839 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16840 Chain, Dest, CC, Cmp);
16841 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16846 } else if (Cond.getOpcode() == ISD::SETCC &&
16847 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16848 // For FCMP_UNE, we can emit
16849 // two branches instead of an explicit AND instruction with a
16850 // separate test. However, we only do this if this block doesn't
16851 // have a fall-through edge, because this requires an explicit
16852 // jmp when the condition is false.
16853 if (Op.getNode()->hasOneUse()) {
16854 SDNode *User = *Op.getNode()->use_begin();
16855 // Look for an unconditional branch following this conditional branch.
16856 // We need this because we need to reverse the successors in order
16857 // to implement FCMP_UNE.
16858 if (User->getOpcode() == ISD::BR) {
16859 SDValue FalseBB = User->getOperand(1);
16861 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16862 assert(NewBR == User);
16865 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16866 Cond.getOperand(0), Cond.getOperand(1));
16867 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16868 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16869 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16870 Chain, Dest, CC, Cmp);
16871 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
16881 // Look pass the truncate if the high bits are known zero.
16882 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16883 Cond = Cond.getOperand(0);
16885 // We know the result of AND is compared against zero. Try to match
16887 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16888 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
16889 if (NewSetCC.getNode()) {
16890 CC = NewSetCC.getOperand(0);
16891 Cond = NewSetCC.getOperand(1);
16898 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16899 CC = DAG.getConstant(X86Cond, MVT::i8);
16900 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16902 Cond = ConvertCmpIfNecessary(Cond, DAG);
16903 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16904 Chain, Dest, CC, Cond);
16907 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
16908 // Calls to _alloca are needed to probe the stack when allocating more than 4k
16909 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
16910 // that the guard pages used by the OS virtual memory manager are allocated in
16911 // correct sequence.
16913 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
16914 SelectionDAG &DAG) const {
16915 MachineFunction &MF = DAG.getMachineFunction();
16916 bool SplitStack = MF.shouldSplitStack();
16917 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
16922 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16923 SDNode* Node = Op.getNode();
16925 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
16926 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
16927 " not tell us which reg is the stack pointer!");
16928 EVT VT = Node->getValueType(0);
16929 SDValue Tmp1 = SDValue(Node, 0);
16930 SDValue Tmp2 = SDValue(Node, 1);
16931 SDValue Tmp3 = Node->getOperand(2);
16932 SDValue Chain = Tmp1.getOperand(0);
16934 // Chain the dynamic stack allocation so that it doesn't modify the stack
16935 // pointer when other instructions are using the stack.
16936 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
16939 SDValue Size = Tmp2.getOperand(1);
16940 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
16941 Chain = SP.getValue(1);
16942 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
16943 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
16944 unsigned StackAlign = TFI.getStackAlignment();
16945 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
16946 if (Align > StackAlign)
16947 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
16948 DAG.getConstant(-(uint64_t)Align, VT));
16949 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
16951 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
16952 DAG.getIntPtrConstant(0, true), SDValue(),
16955 SDValue Ops[2] = { Tmp1, Tmp2 };
16956 return DAG.getMergeValues(Ops, dl);
16960 SDValue Chain = Op.getOperand(0);
16961 SDValue Size = Op.getOperand(1);
16962 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
16963 EVT VT = Op.getNode()->getValueType(0);
16965 bool Is64Bit = Subtarget->is64Bit();
16966 EVT SPTy = getPointerTy();
16969 MachineRegisterInfo &MRI = MF.getRegInfo();
16972 // The 64 bit implementation of segmented stacks needs to clobber both r10
16973 // r11. This makes it impossible to use it along with nested parameters.
16974 const Function *F = MF.getFunction();
16976 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
16978 if (I->hasNestAttr())
16979 report_fatal_error("Cannot use segmented stacks with functions that "
16980 "have nested arguments.");
16983 const TargetRegisterClass *AddrRegClass =
16984 getRegClassFor(getPointerTy());
16985 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
16986 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
16987 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
16988 DAG.getRegister(Vreg, SPTy));
16989 SDValue Ops1[2] = { Value, Chain };
16990 return DAG.getMergeValues(Ops1, dl);
16993 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
16995 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
16996 Flag = Chain.getValue(1);
16997 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
16999 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
17001 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17002 unsigned SPReg = RegInfo->getStackRegister();
17003 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
17004 Chain = SP.getValue(1);
17007 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
17008 DAG.getConstant(-(uint64_t)Align, VT));
17009 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
17012 SDValue Ops1[2] = { SP, Chain };
17013 return DAG.getMergeValues(Ops1, dl);
17017 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
17018 MachineFunction &MF = DAG.getMachineFunction();
17019 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
17021 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17024 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
17025 // vastart just stores the address of the VarArgsFrameIndex slot into the
17026 // memory location argument.
17027 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17029 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
17030 MachinePointerInfo(SV), false, false, 0);
17034 // gp_offset (0 - 6 * 8)
17035 // fp_offset (48 - 48 + 8 * 16)
17036 // overflow_arg_area (point to parameters coming in memory).
17038 SmallVector<SDValue, 8> MemOps;
17039 SDValue FIN = Op.getOperand(1);
17041 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
17042 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
17044 FIN, MachinePointerInfo(SV), false, false, 0);
17045 MemOps.push_back(Store);
17048 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17049 FIN, DAG.getIntPtrConstant(4));
17050 Store = DAG.getStore(Op.getOperand(0), DL,
17051 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
17053 FIN, MachinePointerInfo(SV, 4), false, false, 0);
17054 MemOps.push_back(Store);
17056 // Store ptr to overflow_arg_area
17057 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17058 FIN, DAG.getIntPtrConstant(4));
17059 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17061 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
17062 MachinePointerInfo(SV, 8),
17064 MemOps.push_back(Store);
17066 // Store ptr to reg_save_area.
17067 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17068 FIN, DAG.getIntPtrConstant(8));
17069 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
17071 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
17072 MachinePointerInfo(SV, 16), false, false, 0);
17073 MemOps.push_back(Store);
17074 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
17077 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
17078 assert(Subtarget->is64Bit() &&
17079 "LowerVAARG only handles 64-bit va_arg!");
17080 assert((Subtarget->isTargetLinux() ||
17081 Subtarget->isTargetDarwin()) &&
17082 "Unhandled target in LowerVAARG");
17083 assert(Op.getNode()->getNumOperands() == 4);
17084 SDValue Chain = Op.getOperand(0);
17085 SDValue SrcPtr = Op.getOperand(1);
17086 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17087 unsigned Align = Op.getConstantOperandVal(3);
17090 EVT ArgVT = Op.getNode()->getValueType(0);
17091 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
17092 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
17095 // Decide which area this value should be read from.
17096 // TODO: Implement the AMD64 ABI in its entirety. This simple
17097 // selection mechanism works only for the basic types.
17098 if (ArgVT == MVT::f80) {
17099 llvm_unreachable("va_arg for f80 not yet implemented");
17100 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
17101 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
17102 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
17103 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
17105 llvm_unreachable("Unhandled argument type in LowerVAARG");
17108 if (ArgMode == 2) {
17109 // Sanity Check: Make sure using fp_offset makes sense.
17110 assert(!DAG.getTarget().Options.UseSoftFloat &&
17111 !(DAG.getMachineFunction().getFunction()->hasFnAttribute(
17112 Attribute::NoImplicitFloat)) &&
17113 Subtarget->hasSSE1());
17116 // Insert VAARG_64 node into the DAG
17117 // VAARG_64 returns two values: Variable Argument Address, Chain
17118 SDValue InstOps[] = {Chain, SrcPtr, DAG.getConstant(ArgSize, MVT::i32),
17119 DAG.getConstant(ArgMode, MVT::i8),
17120 DAG.getConstant(Align, MVT::i32)};
17121 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
17122 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
17123 VTs, InstOps, MVT::i64,
17124 MachinePointerInfo(SV),
17126 /*Volatile=*/false,
17128 /*WriteMem=*/true);
17129 Chain = VAARG.getValue(1);
17131 // Load the next argument and return it
17132 return DAG.getLoad(ArgVT, dl,
17135 MachinePointerInfo(),
17136 false, false, false, 0);
17139 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
17140 SelectionDAG &DAG) {
17141 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
17142 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
17143 SDValue Chain = Op.getOperand(0);
17144 SDValue DstPtr = Op.getOperand(1);
17145 SDValue SrcPtr = Op.getOperand(2);
17146 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
17147 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17150 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
17151 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
17153 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
17156 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
17157 // amount is a constant. Takes immediate version of shift as input.
17158 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
17159 SDValue SrcOp, uint64_t ShiftAmt,
17160 SelectionDAG &DAG) {
17161 MVT ElementType = VT.getVectorElementType();
17163 // Fold this packed shift into its first operand if ShiftAmt is 0.
17167 // Check for ShiftAmt >= element width
17168 if (ShiftAmt >= ElementType.getSizeInBits()) {
17169 if (Opc == X86ISD::VSRAI)
17170 ShiftAmt = ElementType.getSizeInBits() - 1;
17172 return DAG.getConstant(0, VT);
17175 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
17176 && "Unknown target vector shift-by-constant node");
17178 // Fold this packed vector shift into a build vector if SrcOp is a
17179 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
17180 if (VT == SrcOp.getSimpleValueType() &&
17181 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
17182 SmallVector<SDValue, 8> Elts;
17183 unsigned NumElts = SrcOp->getNumOperands();
17184 ConstantSDNode *ND;
17187 default: llvm_unreachable(nullptr);
17188 case X86ISD::VSHLI:
17189 for (unsigned i=0; i!=NumElts; ++i) {
17190 SDValue CurrentOp = SrcOp->getOperand(i);
17191 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17192 Elts.push_back(CurrentOp);
17195 ND = cast<ConstantSDNode>(CurrentOp);
17196 const APInt &C = ND->getAPIntValue();
17197 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
17200 case X86ISD::VSRLI:
17201 for (unsigned i=0; i!=NumElts; ++i) {
17202 SDValue CurrentOp = SrcOp->getOperand(i);
17203 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17204 Elts.push_back(CurrentOp);
17207 ND = cast<ConstantSDNode>(CurrentOp);
17208 const APInt &C = ND->getAPIntValue();
17209 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
17212 case X86ISD::VSRAI:
17213 for (unsigned i=0; i!=NumElts; ++i) {
17214 SDValue CurrentOp = SrcOp->getOperand(i);
17215 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17216 Elts.push_back(CurrentOp);
17219 ND = cast<ConstantSDNode>(CurrentOp);
17220 const APInt &C = ND->getAPIntValue();
17221 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
17226 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
17229 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
17232 // getTargetVShiftNode - Handle vector element shifts where the shift amount
17233 // may or may not be a constant. Takes immediate version of shift as input.
17234 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
17235 SDValue SrcOp, SDValue ShAmt,
17236 SelectionDAG &DAG) {
17237 MVT SVT = ShAmt.getSimpleValueType();
17238 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
17240 // Catch shift-by-constant.
17241 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
17242 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
17243 CShAmt->getZExtValue(), DAG);
17245 // Change opcode to non-immediate version
17247 default: llvm_unreachable("Unknown target vector shift node");
17248 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
17249 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
17250 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
17253 const X86Subtarget &Subtarget =
17254 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17255 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
17256 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
17257 // Let the shuffle legalizer expand this shift amount node.
17258 SDValue Op0 = ShAmt.getOperand(0);
17259 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
17260 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
17262 // Need to build a vector containing shift amount.
17263 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
17264 SmallVector<SDValue, 4> ShOps;
17265 ShOps.push_back(ShAmt);
17266 if (SVT == MVT::i32) {
17267 ShOps.push_back(DAG.getConstant(0, SVT));
17268 ShOps.push_back(DAG.getUNDEF(SVT));
17270 ShOps.push_back(DAG.getUNDEF(SVT));
17272 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
17273 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
17276 // The return type has to be a 128-bit type with the same element
17277 // type as the input type.
17278 MVT EltVT = VT.getVectorElementType();
17279 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
17281 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
17282 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
17285 /// \brief Return (and \p Op, \p Mask) for compare instructions or
17286 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
17287 /// necessary casting for \p Mask when lowering masking intrinsics.
17288 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
17289 SDValue PreservedSrc,
17290 const X86Subtarget *Subtarget,
17291 SelectionDAG &DAG) {
17292 EVT VT = Op.getValueType();
17293 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
17294 MVT::i1, VT.getVectorNumElements());
17295 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17296 Mask.getValueType().getSizeInBits());
17299 assert(MaskVT.isSimple() && "invalid mask type");
17301 if (isAllOnes(Mask))
17304 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17305 // are extracted by EXTRACT_SUBVECTOR.
17306 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17307 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17308 DAG.getIntPtrConstant(0));
17310 switch (Op.getOpcode()) {
17312 case X86ISD::PCMPEQM:
17313 case X86ISD::PCMPGTM:
17315 case X86ISD::CMPMU:
17316 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17318 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17319 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17320 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17323 /// \brief Creates an SDNode for a predicated scalar operation.
17324 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17325 /// The mask is comming as MVT::i8 and it should be truncated
17326 /// to MVT::i1 while lowering masking intrinsics.
17327 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17328 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17329 /// a scalar instruction.
17330 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17331 SDValue PreservedSrc,
17332 const X86Subtarget *Subtarget,
17333 SelectionDAG &DAG) {
17334 if (isAllOnes(Mask))
17337 EVT VT = Op.getValueType();
17339 // The mask should be of type MVT::i1
17340 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17342 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17343 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17344 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17347 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17348 SelectionDAG &DAG) {
17350 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17351 EVT VT = Op.getValueType();
17352 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17354 switch(IntrData->Type) {
17355 case INTR_TYPE_1OP:
17356 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17357 case INTR_TYPE_2OP:
17358 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17360 case INTR_TYPE_3OP:
17361 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17362 Op.getOperand(2), Op.getOperand(3));
17363 case INTR_TYPE_1OP_MASK_RM: {
17364 SDValue Src = Op.getOperand(1);
17365 SDValue Src0 = Op.getOperand(2);
17366 SDValue Mask = Op.getOperand(3);
17367 SDValue RoundingMode = Op.getOperand(4);
17368 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17370 Mask, Src0, Subtarget, DAG);
17372 case INTR_TYPE_SCALAR_MASK_RM: {
17373 SDValue Src1 = Op.getOperand(1);
17374 SDValue Src2 = Op.getOperand(2);
17375 SDValue Src0 = Op.getOperand(3);
17376 SDValue Mask = Op.getOperand(4);
17377 // There are 2 kinds of intrinsics in this group:
17378 // (1) With supress-all-exceptions (sae) - 6 operands
17379 // (2) With rounding mode and sae - 7 operands.
17380 if (Op.getNumOperands() == 6) {
17381 SDValue Sae = Op.getOperand(5);
17382 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17384 Mask, Src0, Subtarget, DAG);
17386 assert(Op.getNumOperands() == 7 && "Unexpected intrinsic form");
17387 SDValue RoundingMode = Op.getOperand(5);
17388 SDValue Sae = Op.getOperand(6);
17389 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17390 RoundingMode, Sae),
17391 Mask, Src0, Subtarget, DAG);
17393 case INTR_TYPE_2OP_MASK: {
17394 SDValue Src1 = Op.getOperand(1);
17395 SDValue Src2 = Op.getOperand(2);
17396 SDValue PassThru = Op.getOperand(3);
17397 SDValue Mask = Op.getOperand(4);
17398 // We specify 2 possible opcodes for intrinsics with rounding modes.
17399 // First, we check if the intrinsic may have non-default rounding mode,
17400 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
17401 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17402 if (IntrWithRoundingModeOpcode != 0) {
17403 SDValue Rnd = Op.getOperand(5);
17404 unsigned Round = cast<ConstantSDNode>(Rnd)->getZExtValue();
17405 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17406 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17407 dl, Op.getValueType(),
17409 Mask, PassThru, Subtarget, DAG);
17412 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17414 Mask, PassThru, Subtarget, DAG);
17416 case FMA_OP_MASK: {
17417 SDValue Src1 = Op.getOperand(1);
17418 SDValue Src2 = Op.getOperand(2);
17419 SDValue Src3 = Op.getOperand(3);
17420 SDValue Mask = Op.getOperand(4);
17421 // We specify 2 possible opcodes for intrinsics with rounding modes.
17422 // First, we check if the intrinsic may have non-default rounding mode,
17423 // (IntrData->Opc1 != 0), then we check the rounding mode operand.
17424 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17425 if (IntrWithRoundingModeOpcode != 0) {
17426 SDValue Rnd = Op.getOperand(5);
17427 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17428 X86::STATIC_ROUNDING::CUR_DIRECTION)
17429 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17430 dl, Op.getValueType(),
17431 Src1, Src2, Src3, Rnd),
17432 Mask, Src1, Subtarget, DAG);
17434 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17435 dl, Op.getValueType(),
17437 Mask, Src1, Subtarget, DAG);
17440 case CMP_MASK_CC: {
17441 // Comparison intrinsics with masks.
17442 // Example of transformation:
17443 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17444 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17446 // (v8i1 (insert_subvector undef,
17447 // (v2i1 (and (PCMPEQM %a, %b),
17448 // (extract_subvector
17449 // (v8i1 (bitcast %mask)), 0))), 0))))
17450 EVT VT = Op.getOperand(1).getValueType();
17451 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17452 VT.getVectorNumElements());
17453 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17454 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17455 Mask.getValueType().getSizeInBits());
17457 if (IntrData->Type == CMP_MASK_CC) {
17458 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17459 Op.getOperand(2), Op.getOperand(3));
17461 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17462 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17465 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17466 DAG.getTargetConstant(0, MaskVT),
17468 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17469 DAG.getUNDEF(BitcastVT), CmpMask,
17470 DAG.getIntPtrConstant(0));
17471 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17473 case COMI: { // Comparison intrinsics
17474 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17475 SDValue LHS = Op.getOperand(1);
17476 SDValue RHS = Op.getOperand(2);
17477 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17478 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17479 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17480 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17481 DAG.getConstant(X86CC, MVT::i8), Cond);
17482 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17485 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17486 Op.getOperand(1), Op.getOperand(2), DAG);
17488 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17489 Op.getSimpleValueType(),
17491 Op.getOperand(2), DAG),
17492 Op.getOperand(4), Op.getOperand(3), Subtarget,
17494 case COMPRESS_EXPAND_IN_REG: {
17495 SDValue Mask = Op.getOperand(3);
17496 SDValue DataToCompress = Op.getOperand(1);
17497 SDValue PassThru = Op.getOperand(2);
17498 if (isAllOnes(Mask)) // return data as is
17499 return Op.getOperand(1);
17500 EVT VT = Op.getValueType();
17501 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17502 VT.getVectorNumElements());
17503 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17504 Mask.getValueType().getSizeInBits());
17506 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17507 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17508 DAG.getIntPtrConstant(0));
17510 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17514 SDValue Mask = Op.getOperand(3);
17515 EVT VT = Op.getValueType();
17516 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17517 VT.getVectorNumElements());
17518 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17519 Mask.getValueType().getSizeInBits());
17521 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17522 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17523 DAG.getIntPtrConstant(0));
17524 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17533 default: return SDValue(); // Don't custom lower most intrinsics.
17535 case Intrinsic::x86_avx512_mask_valign_q_512:
17536 case Intrinsic::x86_avx512_mask_valign_d_512:
17537 // Vector source operands are swapped.
17538 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17539 Op.getValueType(), Op.getOperand(2),
17542 Op.getOperand(5), Op.getOperand(4),
17545 // ptest and testp intrinsics. The intrinsic these come from are designed to
17546 // return an integer value, not just an instruction so lower it to the ptest
17547 // or testp pattern and a setcc for the result.
17548 case Intrinsic::x86_sse41_ptestz:
17549 case Intrinsic::x86_sse41_ptestc:
17550 case Intrinsic::x86_sse41_ptestnzc:
17551 case Intrinsic::x86_avx_ptestz_256:
17552 case Intrinsic::x86_avx_ptestc_256:
17553 case Intrinsic::x86_avx_ptestnzc_256:
17554 case Intrinsic::x86_avx_vtestz_ps:
17555 case Intrinsic::x86_avx_vtestc_ps:
17556 case Intrinsic::x86_avx_vtestnzc_ps:
17557 case Intrinsic::x86_avx_vtestz_pd:
17558 case Intrinsic::x86_avx_vtestc_pd:
17559 case Intrinsic::x86_avx_vtestnzc_pd:
17560 case Intrinsic::x86_avx_vtestz_ps_256:
17561 case Intrinsic::x86_avx_vtestc_ps_256:
17562 case Intrinsic::x86_avx_vtestnzc_ps_256:
17563 case Intrinsic::x86_avx_vtestz_pd_256:
17564 case Intrinsic::x86_avx_vtestc_pd_256:
17565 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17566 bool IsTestPacked = false;
17569 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17570 case Intrinsic::x86_avx_vtestz_ps:
17571 case Intrinsic::x86_avx_vtestz_pd:
17572 case Intrinsic::x86_avx_vtestz_ps_256:
17573 case Intrinsic::x86_avx_vtestz_pd_256:
17574 IsTestPacked = true; // Fallthrough
17575 case Intrinsic::x86_sse41_ptestz:
17576 case Intrinsic::x86_avx_ptestz_256:
17578 X86CC = X86::COND_E;
17580 case Intrinsic::x86_avx_vtestc_ps:
17581 case Intrinsic::x86_avx_vtestc_pd:
17582 case Intrinsic::x86_avx_vtestc_ps_256:
17583 case Intrinsic::x86_avx_vtestc_pd_256:
17584 IsTestPacked = true; // Fallthrough
17585 case Intrinsic::x86_sse41_ptestc:
17586 case Intrinsic::x86_avx_ptestc_256:
17588 X86CC = X86::COND_B;
17590 case Intrinsic::x86_avx_vtestnzc_ps:
17591 case Intrinsic::x86_avx_vtestnzc_pd:
17592 case Intrinsic::x86_avx_vtestnzc_ps_256:
17593 case Intrinsic::x86_avx_vtestnzc_pd_256:
17594 IsTestPacked = true; // Fallthrough
17595 case Intrinsic::x86_sse41_ptestnzc:
17596 case Intrinsic::x86_avx_ptestnzc_256:
17598 X86CC = X86::COND_A;
17602 SDValue LHS = Op.getOperand(1);
17603 SDValue RHS = Op.getOperand(2);
17604 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17605 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17606 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17607 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17608 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17610 case Intrinsic::x86_avx512_kortestz_w:
17611 case Intrinsic::x86_avx512_kortestc_w: {
17612 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17613 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17614 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17615 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17616 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17617 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17618 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17621 case Intrinsic::x86_sse42_pcmpistria128:
17622 case Intrinsic::x86_sse42_pcmpestria128:
17623 case Intrinsic::x86_sse42_pcmpistric128:
17624 case Intrinsic::x86_sse42_pcmpestric128:
17625 case Intrinsic::x86_sse42_pcmpistrio128:
17626 case Intrinsic::x86_sse42_pcmpestrio128:
17627 case Intrinsic::x86_sse42_pcmpistris128:
17628 case Intrinsic::x86_sse42_pcmpestris128:
17629 case Intrinsic::x86_sse42_pcmpistriz128:
17630 case Intrinsic::x86_sse42_pcmpestriz128: {
17634 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17635 case Intrinsic::x86_sse42_pcmpistria128:
17636 Opcode = X86ISD::PCMPISTRI;
17637 X86CC = X86::COND_A;
17639 case Intrinsic::x86_sse42_pcmpestria128:
17640 Opcode = X86ISD::PCMPESTRI;
17641 X86CC = X86::COND_A;
17643 case Intrinsic::x86_sse42_pcmpistric128:
17644 Opcode = X86ISD::PCMPISTRI;
17645 X86CC = X86::COND_B;
17647 case Intrinsic::x86_sse42_pcmpestric128:
17648 Opcode = X86ISD::PCMPESTRI;
17649 X86CC = X86::COND_B;
17651 case Intrinsic::x86_sse42_pcmpistrio128:
17652 Opcode = X86ISD::PCMPISTRI;
17653 X86CC = X86::COND_O;
17655 case Intrinsic::x86_sse42_pcmpestrio128:
17656 Opcode = X86ISD::PCMPESTRI;
17657 X86CC = X86::COND_O;
17659 case Intrinsic::x86_sse42_pcmpistris128:
17660 Opcode = X86ISD::PCMPISTRI;
17661 X86CC = X86::COND_S;
17663 case Intrinsic::x86_sse42_pcmpestris128:
17664 Opcode = X86ISD::PCMPESTRI;
17665 X86CC = X86::COND_S;
17667 case Intrinsic::x86_sse42_pcmpistriz128:
17668 Opcode = X86ISD::PCMPISTRI;
17669 X86CC = X86::COND_E;
17671 case Intrinsic::x86_sse42_pcmpestriz128:
17672 Opcode = X86ISD::PCMPESTRI;
17673 X86CC = X86::COND_E;
17676 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17677 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17678 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17679 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17680 DAG.getConstant(X86CC, MVT::i8),
17681 SDValue(PCMP.getNode(), 1));
17682 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17685 case Intrinsic::x86_sse42_pcmpistri128:
17686 case Intrinsic::x86_sse42_pcmpestri128: {
17688 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17689 Opcode = X86ISD::PCMPISTRI;
17691 Opcode = X86ISD::PCMPESTRI;
17693 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17694 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17695 return DAG.getNode(Opcode, dl, VTs, NewOps);
17700 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17701 SDValue Src, SDValue Mask, SDValue Base,
17702 SDValue Index, SDValue ScaleOp, SDValue Chain,
17703 const X86Subtarget * Subtarget) {
17705 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17706 assert(C && "Invalid scale type");
17707 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17708 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17709 Index.getSimpleValueType().getVectorNumElements());
17711 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17713 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17715 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17716 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17717 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17718 SDValue Segment = DAG.getRegister(0, MVT::i32);
17719 if (Src.getOpcode() == ISD::UNDEF)
17720 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17721 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17722 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17723 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17724 return DAG.getMergeValues(RetOps, dl);
17727 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17728 SDValue Src, SDValue Mask, SDValue Base,
17729 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17731 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17732 assert(C && "Invalid scale type");
17733 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17734 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17735 SDValue Segment = DAG.getRegister(0, MVT::i32);
17736 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17737 Index.getSimpleValueType().getVectorNumElements());
17739 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17741 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17743 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17744 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17745 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17746 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17747 return SDValue(Res, 1);
17750 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17751 SDValue Mask, SDValue Base, SDValue Index,
17752 SDValue ScaleOp, SDValue Chain) {
17754 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17755 assert(C && "Invalid scale type");
17756 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17757 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17758 SDValue Segment = DAG.getRegister(0, MVT::i32);
17760 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17762 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17764 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17766 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17767 //SDVTList VTs = DAG.getVTList(MVT::Other);
17768 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17769 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17770 return SDValue(Res, 0);
17773 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17774 // read performance monitor counters (x86_rdpmc).
17775 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17776 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17777 SmallVectorImpl<SDValue> &Results) {
17778 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17779 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17782 // The ECX register is used to select the index of the performance counter
17784 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17786 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17788 // Reads the content of a 64-bit performance counter and returns it in the
17789 // registers EDX:EAX.
17790 if (Subtarget->is64Bit()) {
17791 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17792 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17795 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17796 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17799 Chain = HI.getValue(1);
17801 if (Subtarget->is64Bit()) {
17802 // The EAX register is loaded with the low-order 32 bits. The EDX register
17803 // is loaded with the supported high-order bits of the counter.
17804 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17805 DAG.getConstant(32, MVT::i8));
17806 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17807 Results.push_back(Chain);
17811 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17812 SDValue Ops[] = { LO, HI };
17813 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17814 Results.push_back(Pair);
17815 Results.push_back(Chain);
17818 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17819 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17820 // also used to custom lower READCYCLECOUNTER nodes.
17821 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17822 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17823 SmallVectorImpl<SDValue> &Results) {
17824 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17825 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17828 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17829 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17830 // and the EAX register is loaded with the low-order 32 bits.
17831 if (Subtarget->is64Bit()) {
17832 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17833 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17836 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17837 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17840 SDValue Chain = HI.getValue(1);
17842 if (Opcode == X86ISD::RDTSCP_DAG) {
17843 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17845 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17846 // the ECX register. Add 'ecx' explicitly to the chain.
17847 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17849 // Explicitly store the content of ECX at the location passed in input
17850 // to the 'rdtscp' intrinsic.
17851 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17852 MachinePointerInfo(), false, false, 0);
17855 if (Subtarget->is64Bit()) {
17856 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17857 // the EAX register is loaded with the low-order 32 bits.
17858 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17859 DAG.getConstant(32, MVT::i8));
17860 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17861 Results.push_back(Chain);
17865 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17866 SDValue Ops[] = { LO, HI };
17867 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17868 Results.push_back(Pair);
17869 Results.push_back(Chain);
17872 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17873 SelectionDAG &DAG) {
17874 SmallVector<SDValue, 2> Results;
17876 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17878 return DAG.getMergeValues(Results, DL);
17882 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17883 SelectionDAG &DAG) {
17884 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17886 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17891 switch(IntrData->Type) {
17893 llvm_unreachable("Unknown Intrinsic Type");
17897 // Emit the node with the right value type.
17898 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17899 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17901 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17902 // Otherwise return the value from Rand, which is always 0, casted to i32.
17903 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17904 DAG.getConstant(1, Op->getValueType(1)),
17905 DAG.getConstant(X86::COND_B, MVT::i32),
17906 SDValue(Result.getNode(), 1) };
17907 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17908 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17911 // Return { result, isValid, chain }.
17912 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17913 SDValue(Result.getNode(), 2));
17916 //gather(v1, mask, index, base, scale);
17917 SDValue Chain = Op.getOperand(0);
17918 SDValue Src = Op.getOperand(2);
17919 SDValue Base = Op.getOperand(3);
17920 SDValue Index = Op.getOperand(4);
17921 SDValue Mask = Op.getOperand(5);
17922 SDValue Scale = Op.getOperand(6);
17923 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
17927 //scatter(base, mask, index, v1, scale);
17928 SDValue Chain = Op.getOperand(0);
17929 SDValue Base = Op.getOperand(2);
17930 SDValue Mask = Op.getOperand(3);
17931 SDValue Index = Op.getOperand(4);
17932 SDValue Src = Op.getOperand(5);
17933 SDValue Scale = Op.getOperand(6);
17934 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
17937 SDValue Hint = Op.getOperand(6);
17939 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
17940 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
17941 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
17942 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
17943 SDValue Chain = Op.getOperand(0);
17944 SDValue Mask = Op.getOperand(2);
17945 SDValue Index = Op.getOperand(3);
17946 SDValue Base = Op.getOperand(4);
17947 SDValue Scale = Op.getOperand(5);
17948 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
17950 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
17952 SmallVector<SDValue, 2> Results;
17953 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
17954 return DAG.getMergeValues(Results, dl);
17956 // Read Performance Monitoring Counters.
17958 SmallVector<SDValue, 2> Results;
17959 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
17960 return DAG.getMergeValues(Results, dl);
17962 // XTEST intrinsics.
17964 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17965 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17966 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17967 DAG.getConstant(X86::COND_NE, MVT::i8),
17969 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
17970 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
17971 Ret, SDValue(InTrans.getNode(), 1));
17975 SmallVector<SDValue, 2> Results;
17976 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17977 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
17978 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
17979 DAG.getConstant(-1, MVT::i8));
17980 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
17981 Op.getOperand(4), GenCF.getValue(1));
17982 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
17983 Op.getOperand(5), MachinePointerInfo(),
17985 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17986 DAG.getConstant(X86::COND_B, MVT::i8),
17988 Results.push_back(SetCC);
17989 Results.push_back(Store);
17990 return DAG.getMergeValues(Results, dl);
17992 case COMPRESS_TO_MEM: {
17994 SDValue Mask = Op.getOperand(4);
17995 SDValue DataToCompress = Op.getOperand(3);
17996 SDValue Addr = Op.getOperand(2);
17997 SDValue Chain = Op.getOperand(0);
17999 if (isAllOnes(Mask)) // return just a store
18000 return DAG.getStore(Chain, dl, DataToCompress, Addr,
18001 MachinePointerInfo(), false, false, 0);
18003 EVT VT = DataToCompress.getValueType();
18004 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18005 VT.getVectorNumElements());
18006 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18007 Mask.getValueType().getSizeInBits());
18008 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18009 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18010 DAG.getIntPtrConstant(0));
18012 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
18013 DataToCompress, DAG.getUNDEF(VT));
18014 return DAG.getStore(Chain, dl, Compressed, Addr,
18015 MachinePointerInfo(), false, false, 0);
18017 case EXPAND_FROM_MEM: {
18019 SDValue Mask = Op.getOperand(4);
18020 SDValue PathThru = Op.getOperand(3);
18021 SDValue Addr = Op.getOperand(2);
18022 SDValue Chain = Op.getOperand(0);
18023 EVT VT = Op.getValueType();
18025 if (isAllOnes(Mask)) // return just a load
18026 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
18028 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18029 VT.getVectorNumElements());
18030 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18031 Mask.getValueType().getSizeInBits());
18032 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18033 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18034 DAG.getIntPtrConstant(0));
18036 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
18037 false, false, false, 0);
18039 SDValue Results[] = {
18040 DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand, PathThru),
18042 return DAG.getMergeValues(Results, dl);
18047 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
18048 SelectionDAG &DAG) const {
18049 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
18050 MFI->setReturnAddressIsTaken(true);
18052 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
18055 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18057 EVT PtrVT = getPointerTy();
18060 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
18061 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18062 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
18063 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18064 DAG.getNode(ISD::ADD, dl, PtrVT,
18065 FrameAddr, Offset),
18066 MachinePointerInfo(), false, false, false, 0);
18069 // Just load the return address.
18070 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
18071 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18072 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
18075 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
18076 MachineFunction &MF = DAG.getMachineFunction();
18077 MachineFrameInfo *MFI = MF.getFrameInfo();
18078 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
18079 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18080 EVT VT = Op.getValueType();
18082 MFI->setFrameAddressIsTaken(true);
18084 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
18085 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
18086 // is not possible to crawl up the stack without looking at the unwind codes
18088 int FrameAddrIndex = FuncInfo->getFAIndex();
18089 if (!FrameAddrIndex) {
18090 // Set up a frame object for the return address.
18091 unsigned SlotSize = RegInfo->getSlotSize();
18092 FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
18093 SlotSize, /*Offset=*/INT64_MIN, /*IsImmutable=*/false);
18094 FuncInfo->setFAIndex(FrameAddrIndex);
18096 return DAG.getFrameIndex(FrameAddrIndex, VT);
18099 unsigned FrameReg =
18100 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
18101 SDLoc dl(Op); // FIXME probably not meaningful
18102 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18103 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
18104 (FrameReg == X86::EBP && VT == MVT::i32)) &&
18105 "Invalid Frame Register!");
18106 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
18108 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
18109 MachinePointerInfo(),
18110 false, false, false, 0);
18114 // FIXME? Maybe this could be a TableGen attribute on some registers and
18115 // this table could be generated automatically from RegInfo.
18116 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
18118 unsigned Reg = StringSwitch<unsigned>(RegName)
18119 .Case("esp", X86::ESP)
18120 .Case("rsp", X86::RSP)
18124 report_fatal_error("Invalid register name global variable");
18127 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
18128 SelectionDAG &DAG) const {
18129 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18130 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
18133 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
18134 SDValue Chain = Op.getOperand(0);
18135 SDValue Offset = Op.getOperand(1);
18136 SDValue Handler = Op.getOperand(2);
18139 EVT PtrVT = getPointerTy();
18140 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18141 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
18142 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
18143 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
18144 "Invalid Frame Register!");
18145 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
18146 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
18148 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
18149 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
18150 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
18151 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
18153 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
18155 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
18156 DAG.getRegister(StoreAddrReg, PtrVT));
18159 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
18160 SelectionDAG &DAG) const {
18162 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
18163 DAG.getVTList(MVT::i32, MVT::Other),
18164 Op.getOperand(0), Op.getOperand(1));
18167 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
18168 SelectionDAG &DAG) const {
18170 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
18171 Op.getOperand(0), Op.getOperand(1));
18174 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
18175 return Op.getOperand(0);
18178 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
18179 SelectionDAG &DAG) const {
18180 SDValue Root = Op.getOperand(0);
18181 SDValue Trmp = Op.getOperand(1); // trampoline
18182 SDValue FPtr = Op.getOperand(2); // nested function
18183 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
18186 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
18187 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
18189 if (Subtarget->is64Bit()) {
18190 SDValue OutChains[6];
18192 // Large code-model.
18193 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
18194 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
18196 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
18197 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
18199 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
18201 // Load the pointer to the nested function into R11.
18202 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
18203 SDValue Addr = Trmp;
18204 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18205 Addr, MachinePointerInfo(TrmpAddr),
18208 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18209 DAG.getConstant(2, MVT::i64));
18210 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
18211 MachinePointerInfo(TrmpAddr, 2),
18214 // Load the 'nest' parameter value into R10.
18215 // R10 is specified in X86CallingConv.td
18216 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
18217 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18218 DAG.getConstant(10, MVT::i64));
18219 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18220 Addr, MachinePointerInfo(TrmpAddr, 10),
18223 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18224 DAG.getConstant(12, MVT::i64));
18225 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
18226 MachinePointerInfo(TrmpAddr, 12),
18229 // Jump to the nested function.
18230 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
18231 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18232 DAG.getConstant(20, MVT::i64));
18233 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18234 Addr, MachinePointerInfo(TrmpAddr, 20),
18237 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
18238 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18239 DAG.getConstant(22, MVT::i64));
18240 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
18241 MachinePointerInfo(TrmpAddr, 22),
18244 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18246 const Function *Func =
18247 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
18248 CallingConv::ID CC = Func->getCallingConv();
18253 llvm_unreachable("Unsupported calling convention");
18254 case CallingConv::C:
18255 case CallingConv::X86_StdCall: {
18256 // Pass 'nest' parameter in ECX.
18257 // Must be kept in sync with X86CallingConv.td
18258 NestReg = X86::ECX;
18260 // Check that ECX wasn't needed by an 'inreg' parameter.
18261 FunctionType *FTy = Func->getFunctionType();
18262 const AttributeSet &Attrs = Func->getAttributes();
18264 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18265 unsigned InRegCount = 0;
18268 for (FunctionType::param_iterator I = FTy->param_begin(),
18269 E = FTy->param_end(); I != E; ++I, ++Idx)
18270 if (Attrs.hasAttribute(Idx, Attribute::InReg))
18271 // FIXME: should only count parameters that are lowered to integers.
18272 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
18274 if (InRegCount > 2) {
18275 report_fatal_error("Nest register in use - reduce number of inreg"
18281 case CallingConv::X86_FastCall:
18282 case CallingConv::X86_ThisCall:
18283 case CallingConv::Fast:
18284 // Pass 'nest' parameter in EAX.
18285 // Must be kept in sync with X86CallingConv.td
18286 NestReg = X86::EAX;
18290 SDValue OutChains[4];
18291 SDValue Addr, Disp;
18293 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18294 DAG.getConstant(10, MVT::i32));
18295 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18297 // This is storing the opcode for MOV32ri.
18298 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18299 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18300 OutChains[0] = DAG.getStore(Root, dl,
18301 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
18302 Trmp, MachinePointerInfo(TrmpAddr),
18305 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18306 DAG.getConstant(1, MVT::i32));
18307 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
18308 MachinePointerInfo(TrmpAddr, 1),
18311 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18312 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18313 DAG.getConstant(5, MVT::i32));
18314 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
18315 MachinePointerInfo(TrmpAddr, 5),
18318 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18319 DAG.getConstant(6, MVT::i32));
18320 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18321 MachinePointerInfo(TrmpAddr, 6),
18324 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18328 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18329 SelectionDAG &DAG) const {
18331 The rounding mode is in bits 11:10 of FPSR, and has the following
18333 00 Round to nearest
18338 FLT_ROUNDS, on the other hand, expects the following:
18345 To perform the conversion, we do:
18346 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18349 MachineFunction &MF = DAG.getMachineFunction();
18350 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
18351 unsigned StackAlignment = TFI.getStackAlignment();
18352 MVT VT = Op.getSimpleValueType();
18355 // Save FP Control Word to stack slot
18356 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18357 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18359 MachineMemOperand *MMO =
18360 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18361 MachineMemOperand::MOStore, 2, 2);
18363 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18364 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18365 DAG.getVTList(MVT::Other),
18366 Ops, MVT::i16, MMO);
18368 // Load FP Control Word from stack slot
18369 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18370 MachinePointerInfo(), false, false, false, 0);
18372 // Transform as necessary
18374 DAG.getNode(ISD::SRL, DL, MVT::i16,
18375 DAG.getNode(ISD::AND, DL, MVT::i16,
18376 CWD, DAG.getConstant(0x800, MVT::i16)),
18377 DAG.getConstant(11, MVT::i8));
18379 DAG.getNode(ISD::SRL, DL, MVT::i16,
18380 DAG.getNode(ISD::AND, DL, MVT::i16,
18381 CWD, DAG.getConstant(0x400, MVT::i16)),
18382 DAG.getConstant(9, MVT::i8));
18385 DAG.getNode(ISD::AND, DL, MVT::i16,
18386 DAG.getNode(ISD::ADD, DL, MVT::i16,
18387 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18388 DAG.getConstant(1, MVT::i16)),
18389 DAG.getConstant(3, MVT::i16));
18391 return DAG.getNode((VT.getSizeInBits() < 16 ?
18392 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18395 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18396 MVT VT = Op.getSimpleValueType();
18398 unsigned NumBits = VT.getSizeInBits();
18401 Op = Op.getOperand(0);
18402 if (VT == MVT::i8) {
18403 // Zero extend to i32 since there is not an i8 bsr.
18405 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18408 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18409 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18410 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18412 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18415 DAG.getConstant(NumBits+NumBits-1, OpVT),
18416 DAG.getConstant(X86::COND_E, MVT::i8),
18419 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18421 // Finally xor with NumBits-1.
18422 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18425 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18429 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18430 MVT VT = Op.getSimpleValueType();
18432 unsigned NumBits = VT.getSizeInBits();
18435 Op = Op.getOperand(0);
18436 if (VT == MVT::i8) {
18437 // Zero extend to i32 since there is not an i8 bsr.
18439 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18442 // Issue a bsr (scan bits in reverse).
18443 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18444 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18446 // And xor with NumBits-1.
18447 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18450 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18454 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18455 MVT VT = Op.getSimpleValueType();
18456 unsigned NumBits = VT.getSizeInBits();
18458 Op = Op.getOperand(0);
18460 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18461 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18462 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18464 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18467 DAG.getConstant(NumBits, VT),
18468 DAG.getConstant(X86::COND_E, MVT::i8),
18471 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18474 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18475 // ones, and then concatenate the result back.
18476 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18477 MVT VT = Op.getSimpleValueType();
18479 assert(VT.is256BitVector() && VT.isInteger() &&
18480 "Unsupported value type for operation");
18482 unsigned NumElems = VT.getVectorNumElements();
18485 // Extract the LHS vectors
18486 SDValue LHS = Op.getOperand(0);
18487 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18488 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18490 // Extract the RHS vectors
18491 SDValue RHS = Op.getOperand(1);
18492 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18493 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18495 MVT EltVT = VT.getVectorElementType();
18496 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18498 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18499 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18500 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18503 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18504 assert(Op.getSimpleValueType().is256BitVector() &&
18505 Op.getSimpleValueType().isInteger() &&
18506 "Only handle AVX 256-bit vector integer operation");
18507 return Lower256IntArith(Op, DAG);
18510 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18511 assert(Op.getSimpleValueType().is256BitVector() &&
18512 Op.getSimpleValueType().isInteger() &&
18513 "Only handle AVX 256-bit vector integer operation");
18514 return Lower256IntArith(Op, DAG);
18517 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18518 SelectionDAG &DAG) {
18520 MVT VT = Op.getSimpleValueType();
18522 // Decompose 256-bit ops into smaller 128-bit ops.
18523 if (VT.is256BitVector() && !Subtarget->hasInt256())
18524 return Lower256IntArith(Op, DAG);
18526 SDValue A = Op.getOperand(0);
18527 SDValue B = Op.getOperand(1);
18529 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18530 if (VT == MVT::v4i32) {
18531 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18532 "Should not custom lower when pmuldq is available!");
18534 // Extract the odd parts.
18535 static const int UnpackMask[] = { 1, -1, 3, -1 };
18536 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18537 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18539 // Multiply the even parts.
18540 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18541 // Now multiply odd parts.
18542 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18544 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18545 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18547 // Merge the two vectors back together with a shuffle. This expands into 2
18549 static const int ShufMask[] = { 0, 4, 2, 6 };
18550 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18553 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18554 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18556 // Ahi = psrlqi(a, 32);
18557 // Bhi = psrlqi(b, 32);
18559 // AloBlo = pmuludq(a, b);
18560 // AloBhi = pmuludq(a, Bhi);
18561 // AhiBlo = pmuludq(Ahi, b);
18563 // AloBhi = psllqi(AloBhi, 32);
18564 // AhiBlo = psllqi(AhiBlo, 32);
18565 // return AloBlo + AloBhi + AhiBlo;
18567 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18568 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18570 // Bit cast to 32-bit vectors for MULUDQ
18571 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18572 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18573 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18574 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18575 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18576 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18578 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18579 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18580 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18582 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18583 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18585 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18586 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18589 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18590 assert(Subtarget->isTargetWin64() && "Unexpected target");
18591 EVT VT = Op.getValueType();
18592 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18593 "Unexpected return type for lowering");
18597 switch (Op->getOpcode()) {
18598 default: llvm_unreachable("Unexpected request for libcall!");
18599 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18600 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18601 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18602 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18603 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18604 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18608 SDValue InChain = DAG.getEntryNode();
18610 TargetLowering::ArgListTy Args;
18611 TargetLowering::ArgListEntry Entry;
18612 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18613 EVT ArgVT = Op->getOperand(i).getValueType();
18614 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18615 "Unexpected argument type for lowering");
18616 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18617 Entry.Node = StackPtr;
18618 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18620 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18621 Entry.Ty = PointerType::get(ArgTy,0);
18622 Entry.isSExt = false;
18623 Entry.isZExt = false;
18624 Args.push_back(Entry);
18627 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18630 TargetLowering::CallLoweringInfo CLI(DAG);
18631 CLI.setDebugLoc(dl).setChain(InChain)
18632 .setCallee(getLibcallCallingConv(LC),
18633 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18634 Callee, std::move(Args), 0)
18635 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18637 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18638 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18641 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18642 SelectionDAG &DAG) {
18643 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18644 EVT VT = Op0.getValueType();
18647 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18648 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18650 // PMULxD operations multiply each even value (starting at 0) of LHS with
18651 // the related value of RHS and produce a widen result.
18652 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18653 // => <2 x i64> <ae|cg>
18655 // In other word, to have all the results, we need to perform two PMULxD:
18656 // 1. one with the even values.
18657 // 2. one with the odd values.
18658 // To achieve #2, with need to place the odd values at an even position.
18660 // Place the odd value at an even position (basically, shift all values 1
18661 // step to the left):
18662 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18663 // <a|b|c|d> => <b|undef|d|undef>
18664 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18665 // <e|f|g|h> => <f|undef|h|undef>
18666 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18668 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18670 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18671 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18673 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18674 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18675 // => <2 x i64> <ae|cg>
18676 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18677 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18678 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18679 // => <2 x i64> <bf|dh>
18680 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18681 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18683 // Shuffle it back into the right order.
18684 SDValue Highs, Lows;
18685 if (VT == MVT::v8i32) {
18686 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18687 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18688 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18689 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18691 const int HighMask[] = {1, 5, 3, 7};
18692 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18693 const int LowMask[] = {0, 4, 2, 6};
18694 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18697 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18698 // unsigned multiply.
18699 if (IsSigned && !Subtarget->hasSSE41()) {
18701 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18702 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18703 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18704 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18705 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18707 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18708 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18711 // The first result of MUL_LOHI is actually the low value, followed by the
18713 SDValue Ops[] = {Lows, Highs};
18714 return DAG.getMergeValues(Ops, dl);
18717 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18718 const X86Subtarget *Subtarget) {
18719 MVT VT = Op.getSimpleValueType();
18721 SDValue R = Op.getOperand(0);
18722 SDValue Amt = Op.getOperand(1);
18724 // Optimize shl/srl/sra with constant shift amount.
18725 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18726 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18727 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18729 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18730 (Subtarget->hasInt256() &&
18731 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18732 (Subtarget->hasAVX512() &&
18733 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18734 if (Op.getOpcode() == ISD::SHL)
18735 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18737 if (Op.getOpcode() == ISD::SRL)
18738 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18740 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18741 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18745 if (VT == MVT::v16i8) {
18746 if (Op.getOpcode() == ISD::SHL) {
18747 // Make a large shift.
18748 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18749 MVT::v8i16, R, ShiftAmt,
18751 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18752 // Zero out the rightmost bits.
18753 SmallVector<SDValue, 16> V(16,
18754 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18756 return DAG.getNode(ISD::AND, dl, VT, SHL,
18757 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18759 if (Op.getOpcode() == ISD::SRL) {
18760 // Make a large shift.
18761 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18762 MVT::v8i16, R, ShiftAmt,
18764 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18765 // Zero out the leftmost bits.
18766 SmallVector<SDValue, 16> V(16,
18767 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18769 return DAG.getNode(ISD::AND, dl, VT, SRL,
18770 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18772 if (Op.getOpcode() == ISD::SRA) {
18773 if (ShiftAmt == 7) {
18774 // R s>> 7 === R s< 0
18775 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18776 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18779 // R s>> a === ((R u>> a) ^ m) - m
18780 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18781 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18783 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18784 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18785 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18788 llvm_unreachable("Unknown shift opcode.");
18791 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18792 if (Op.getOpcode() == ISD::SHL) {
18793 // Make a large shift.
18794 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18795 MVT::v16i16, R, ShiftAmt,
18797 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18798 // Zero out the rightmost bits.
18799 SmallVector<SDValue, 32> V(32,
18800 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18802 return DAG.getNode(ISD::AND, dl, VT, SHL,
18803 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18805 if (Op.getOpcode() == ISD::SRL) {
18806 // Make a large shift.
18807 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18808 MVT::v16i16, R, ShiftAmt,
18810 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18811 // Zero out the leftmost bits.
18812 SmallVector<SDValue, 32> V(32,
18813 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18815 return DAG.getNode(ISD::AND, dl, VT, SRL,
18816 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18818 if (Op.getOpcode() == ISD::SRA) {
18819 if (ShiftAmt == 7) {
18820 // R s>> 7 === R s< 0
18821 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18822 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18825 // R s>> a === ((R u>> a) ^ m) - m
18826 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18827 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18829 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18830 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18831 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18834 llvm_unreachable("Unknown shift opcode.");
18839 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18840 if (!Subtarget->is64Bit() &&
18841 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18842 Amt.getOpcode() == ISD::BITCAST &&
18843 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18844 Amt = Amt.getOperand(0);
18845 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18846 VT.getVectorNumElements();
18847 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18848 uint64_t ShiftAmt = 0;
18849 for (unsigned i = 0; i != Ratio; ++i) {
18850 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18854 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18856 // Check remaining shift amounts.
18857 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18858 uint64_t ShAmt = 0;
18859 for (unsigned j = 0; j != Ratio; ++j) {
18860 ConstantSDNode *C =
18861 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18865 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18867 if (ShAmt != ShiftAmt)
18870 switch (Op.getOpcode()) {
18872 llvm_unreachable("Unknown shift opcode!");
18874 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18877 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18880 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18888 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18889 const X86Subtarget* Subtarget) {
18890 MVT VT = Op.getSimpleValueType();
18892 SDValue R = Op.getOperand(0);
18893 SDValue Amt = Op.getOperand(1);
18895 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
18896 VT == MVT::v4i32 || VT == MVT::v8i16 ||
18897 (Subtarget->hasInt256() &&
18898 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
18899 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18900 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18902 EVT EltVT = VT.getVectorElementType();
18904 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18905 // Check if this build_vector node is doing a splat.
18906 // If so, then set BaseShAmt equal to the splat value.
18907 BaseShAmt = BV->getSplatValue();
18908 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18909 BaseShAmt = SDValue();
18911 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18912 Amt = Amt.getOperand(0);
18914 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18915 if (SVN && SVN->isSplat()) {
18916 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18917 SDValue InVec = Amt.getOperand(0);
18918 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18919 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
18920 "Unexpected shuffle index found!");
18921 BaseShAmt = InVec.getOperand(SplatIdx);
18922 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
18923 if (ConstantSDNode *C =
18924 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
18925 if (C->getZExtValue() == SplatIdx)
18926 BaseShAmt = InVec.getOperand(1);
18931 // Avoid introducing an extract element from a shuffle.
18932 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
18933 DAG.getIntPtrConstant(SplatIdx));
18937 if (BaseShAmt.getNode()) {
18938 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
18939 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
18940 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
18941 else if (EltVT.bitsLT(MVT::i32))
18942 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
18944 switch (Op.getOpcode()) {
18946 llvm_unreachable("Unknown shift opcode!");
18948 switch (VT.SimpleTy) {
18949 default: return SDValue();
18958 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
18961 switch (VT.SimpleTy) {
18962 default: return SDValue();
18969 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
18972 switch (VT.SimpleTy) {
18973 default: return SDValue();
18982 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
18988 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18989 if (!Subtarget->is64Bit() &&
18990 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
18991 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
18992 Amt.getOpcode() == ISD::BITCAST &&
18993 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18994 Amt = Amt.getOperand(0);
18995 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18996 VT.getVectorNumElements();
18997 std::vector<SDValue> Vals(Ratio);
18998 for (unsigned i = 0; i != Ratio; ++i)
18999 Vals[i] = Amt.getOperand(i);
19000 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
19001 for (unsigned j = 0; j != Ratio; ++j)
19002 if (Vals[j] != Amt.getOperand(i + j))
19005 switch (Op.getOpcode()) {
19007 llvm_unreachable("Unknown shift opcode!");
19009 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
19011 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
19013 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
19020 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
19021 SelectionDAG &DAG) {
19022 MVT VT = Op.getSimpleValueType();
19024 SDValue R = Op.getOperand(0);
19025 SDValue Amt = Op.getOperand(1);
19028 assert(VT.isVector() && "Custom lowering only for vector shifts!");
19029 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
19031 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
19035 V = LowerScalarVariableShift(Op, DAG, Subtarget);
19039 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
19041 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
19042 if (Subtarget->hasInt256()) {
19043 if (Op.getOpcode() == ISD::SRL &&
19044 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19045 VT == MVT::v4i64 || VT == MVT::v8i32))
19047 if (Op.getOpcode() == ISD::SHL &&
19048 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19049 VT == MVT::v4i64 || VT == MVT::v8i32))
19051 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
19055 // If possible, lower this packed shift into a vector multiply instead of
19056 // expanding it into a sequence of scalar shifts.
19057 // Do this only if the vector shift count is a constant build_vector.
19058 if (Op.getOpcode() == ISD::SHL &&
19059 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
19060 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
19061 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19062 SmallVector<SDValue, 8> Elts;
19063 EVT SVT = VT.getScalarType();
19064 unsigned SVTBits = SVT.getSizeInBits();
19065 const APInt &One = APInt(SVTBits, 1);
19066 unsigned NumElems = VT.getVectorNumElements();
19068 for (unsigned i=0; i !=NumElems; ++i) {
19069 SDValue Op = Amt->getOperand(i);
19070 if (Op->getOpcode() == ISD::UNDEF) {
19071 Elts.push_back(Op);
19075 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
19076 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
19077 uint64_t ShAmt = C.getZExtValue();
19078 if (ShAmt >= SVTBits) {
19079 Elts.push_back(DAG.getUNDEF(SVT));
19082 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
19084 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
19085 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
19088 // Lower SHL with variable shift amount.
19089 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
19090 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
19092 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
19093 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
19094 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
19095 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
19098 // If possible, lower this shift as a sequence of two shifts by
19099 // constant plus a MOVSS/MOVSD instead of scalarizing it.
19101 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
19103 // Could be rewritten as:
19104 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
19106 // The advantage is that the two shifts from the example would be
19107 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
19108 // the vector shift into four scalar shifts plus four pairs of vector
19110 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
19111 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19112 unsigned TargetOpcode = X86ISD::MOVSS;
19113 bool CanBeSimplified;
19114 // The splat value for the first packed shift (the 'X' from the example).
19115 SDValue Amt1 = Amt->getOperand(0);
19116 // The splat value for the second packed shift (the 'Y' from the example).
19117 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
19118 Amt->getOperand(2);
19120 // See if it is possible to replace this node with a sequence of
19121 // two shifts followed by a MOVSS/MOVSD
19122 if (VT == MVT::v4i32) {
19123 // Check if it is legal to use a MOVSS.
19124 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
19125 Amt2 == Amt->getOperand(3);
19126 if (!CanBeSimplified) {
19127 // Otherwise, check if we can still simplify this node using a MOVSD.
19128 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
19129 Amt->getOperand(2) == Amt->getOperand(3);
19130 TargetOpcode = X86ISD::MOVSD;
19131 Amt2 = Amt->getOperand(2);
19134 // Do similar checks for the case where the machine value type
19136 CanBeSimplified = Amt1 == Amt->getOperand(1);
19137 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
19138 CanBeSimplified = Amt2 == Amt->getOperand(i);
19140 if (!CanBeSimplified) {
19141 TargetOpcode = X86ISD::MOVSD;
19142 CanBeSimplified = true;
19143 Amt2 = Amt->getOperand(4);
19144 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
19145 CanBeSimplified = Amt1 == Amt->getOperand(i);
19146 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
19147 CanBeSimplified = Amt2 == Amt->getOperand(j);
19151 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
19152 isa<ConstantSDNode>(Amt2)) {
19153 // Replace this node with two shifts followed by a MOVSS/MOVSD.
19154 EVT CastVT = MVT::v4i32;
19156 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
19157 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
19159 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
19160 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
19161 if (TargetOpcode == X86ISD::MOVSD)
19162 CastVT = MVT::v2i64;
19163 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
19164 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
19165 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
19167 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
19171 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
19172 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
19175 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
19176 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
19178 // Turn 'a' into a mask suitable for VSELECT
19179 SDValue VSelM = DAG.getConstant(0x80, VT);
19180 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19181 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19183 SDValue CM1 = DAG.getConstant(0x0f, VT);
19184 SDValue CM2 = DAG.getConstant(0x3f, VT);
19186 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
19187 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
19188 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
19189 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19190 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19193 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19194 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19195 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19197 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
19198 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
19199 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
19200 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19201 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19204 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19205 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19206 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19208 // return VSELECT(r, r+r, a);
19209 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
19210 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
19214 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
19215 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
19216 // solution better.
19217 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
19218 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
19220 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
19221 R = DAG.getNode(ExtOpc, dl, NewVT, R);
19222 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
19223 return DAG.getNode(ISD::TRUNCATE, dl, VT,
19224 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
19227 // Decompose 256-bit shifts into smaller 128-bit shifts.
19228 if (VT.is256BitVector()) {
19229 unsigned NumElems = VT.getVectorNumElements();
19230 MVT EltVT = VT.getVectorElementType();
19231 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19233 // Extract the two vectors
19234 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
19235 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
19237 // Recreate the shift amount vectors
19238 SDValue Amt1, Amt2;
19239 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
19240 // Constant shift amount
19241 SmallVector<SDValue, 4> Amt1Csts;
19242 SmallVector<SDValue, 4> Amt2Csts;
19243 for (unsigned i = 0; i != NumElems/2; ++i)
19244 Amt1Csts.push_back(Amt->getOperand(i));
19245 for (unsigned i = NumElems/2; i != NumElems; ++i)
19246 Amt2Csts.push_back(Amt->getOperand(i));
19248 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
19249 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
19251 // Variable shift amount
19252 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
19253 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
19256 // Issue new vector shifts for the smaller types
19257 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
19258 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
19260 // Concatenate the result back
19261 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19267 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19268 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19269 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19270 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19271 // has only one use.
19272 SDNode *N = Op.getNode();
19273 SDValue LHS = N->getOperand(0);
19274 SDValue RHS = N->getOperand(1);
19275 unsigned BaseOp = 0;
19278 switch (Op.getOpcode()) {
19279 default: llvm_unreachable("Unknown ovf instruction!");
19281 // A subtract of one will be selected as a INC. Note that INC doesn't
19282 // set CF, so we can't do this for UADDO.
19283 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19285 BaseOp = X86ISD::INC;
19286 Cond = X86::COND_O;
19289 BaseOp = X86ISD::ADD;
19290 Cond = X86::COND_O;
19293 BaseOp = X86ISD::ADD;
19294 Cond = X86::COND_B;
19297 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19298 // set CF, so we can't do this for USUBO.
19299 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19301 BaseOp = X86ISD::DEC;
19302 Cond = X86::COND_O;
19305 BaseOp = X86ISD::SUB;
19306 Cond = X86::COND_O;
19309 BaseOp = X86ISD::SUB;
19310 Cond = X86::COND_B;
19313 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19314 Cond = X86::COND_O;
19316 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19317 if (N->getValueType(0) == MVT::i8) {
19318 BaseOp = X86ISD::UMUL8;
19319 Cond = X86::COND_O;
19322 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19324 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19327 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19328 DAG.getConstant(X86::COND_O, MVT::i32),
19329 SDValue(Sum.getNode(), 2));
19331 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19335 // Also sets EFLAGS.
19336 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19337 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19340 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19341 DAG.getConstant(Cond, MVT::i32),
19342 SDValue(Sum.getNode(), 1));
19344 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19347 // Sign extension of the low part of vector elements. This may be used either
19348 // when sign extend instructions are not available or if the vector element
19349 // sizes already match the sign-extended size. If the vector elements are in
19350 // their pre-extended size and sign extend instructions are available, that will
19351 // be handled by LowerSIGN_EXTEND.
19352 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19353 SelectionDAG &DAG) const {
19355 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19356 MVT VT = Op.getSimpleValueType();
19358 if (!Subtarget->hasSSE2() || !VT.isVector())
19361 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19362 ExtraVT.getScalarType().getSizeInBits();
19364 switch (VT.SimpleTy) {
19365 default: return SDValue();
19368 if (!Subtarget->hasFp256())
19370 if (!Subtarget->hasInt256()) {
19371 // needs to be split
19372 unsigned NumElems = VT.getVectorNumElements();
19374 // Extract the LHS vectors
19375 SDValue LHS = Op.getOperand(0);
19376 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19377 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19379 MVT EltVT = VT.getVectorElementType();
19380 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19382 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19383 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19384 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19386 SDValue Extra = DAG.getValueType(ExtraVT);
19388 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19389 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19391 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19396 SDValue Op0 = Op.getOperand(0);
19398 // This is a sign extension of some low part of vector elements without
19399 // changing the size of the vector elements themselves:
19400 // Shift-Left + Shift-Right-Algebraic.
19401 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19403 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19409 /// Returns true if the operand type is exactly twice the native width, and
19410 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19411 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19412 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19413 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19414 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19417 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19418 else if (OpWidth == 128)
19419 return Subtarget->hasCmpxchg16b();
19424 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19425 return needsCmpXchgNb(SI->getValueOperand()->getType());
19428 // Note: this turns large loads into lock cmpxchg8b/16b.
19429 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19430 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19431 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19432 return needsCmpXchgNb(PTy->getElementType());
19435 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19436 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19437 const Type *MemType = AI->getType();
19439 // If the operand is too big, we must see if cmpxchg8/16b is available
19440 // and default to library calls otherwise.
19441 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19442 return needsCmpXchgNb(MemType);
19444 AtomicRMWInst::BinOp Op = AI->getOperation();
19447 llvm_unreachable("Unknown atomic operation");
19448 case AtomicRMWInst::Xchg:
19449 case AtomicRMWInst::Add:
19450 case AtomicRMWInst::Sub:
19451 // It's better to use xadd, xsub or xchg for these in all cases.
19453 case AtomicRMWInst::Or:
19454 case AtomicRMWInst::And:
19455 case AtomicRMWInst::Xor:
19456 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19457 // prefix to a normal instruction for these operations.
19458 return !AI->use_empty();
19459 case AtomicRMWInst::Nand:
19460 case AtomicRMWInst::Max:
19461 case AtomicRMWInst::Min:
19462 case AtomicRMWInst::UMax:
19463 case AtomicRMWInst::UMin:
19464 // These always require a non-trivial set of data operations on x86. We must
19465 // use a cmpxchg loop.
19470 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19471 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19472 // no-sse2). There isn't any reason to disable it if the target processor
19474 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19478 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19479 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19480 const Type *MemType = AI->getType();
19481 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19482 // there is no benefit in turning such RMWs into loads, and it is actually
19483 // harmful as it introduces a mfence.
19484 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19487 auto Builder = IRBuilder<>(AI);
19488 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19489 auto SynchScope = AI->getSynchScope();
19490 // We must restrict the ordering to avoid generating loads with Release or
19491 // ReleaseAcquire orderings.
19492 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19493 auto Ptr = AI->getPointerOperand();
19495 // Before the load we need a fence. Here is an example lifted from
19496 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19499 // x.store(1, relaxed);
19500 // r1 = y.fetch_add(0, release);
19502 // y.fetch_add(42, acquire);
19503 // r2 = x.load(relaxed);
19504 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19505 // lowered to just a load without a fence. A mfence flushes the store buffer,
19506 // making the optimization clearly correct.
19507 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19508 // otherwise, we might be able to be more agressive on relaxed idempotent
19509 // rmw. In practice, they do not look useful, so we don't try to be
19510 // especially clever.
19511 if (SynchScope == SingleThread) {
19512 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19513 // the IR level, so we must wrap it in an intrinsic.
19515 } else if (hasMFENCE(*Subtarget)) {
19516 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19517 Intrinsic::x86_sse2_mfence);
19518 Builder.CreateCall(MFence);
19520 // FIXME: it might make sense to use a locked operation here but on a
19521 // different cache-line to prevent cache-line bouncing. In practice it
19522 // is probably a small win, and x86 processors without mfence are rare
19523 // enough that we do not bother.
19527 // Finally we can emit the atomic load.
19528 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19529 AI->getType()->getPrimitiveSizeInBits());
19530 Loaded->setAtomic(Order, SynchScope);
19531 AI->replaceAllUsesWith(Loaded);
19532 AI->eraseFromParent();
19536 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19537 SelectionDAG &DAG) {
19539 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19540 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19541 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19542 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19544 // The only fence that needs an instruction is a sequentially-consistent
19545 // cross-thread fence.
19546 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19547 if (hasMFENCE(*Subtarget))
19548 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19550 SDValue Chain = Op.getOperand(0);
19551 SDValue Zero = DAG.getConstant(0, MVT::i32);
19553 DAG.getRegister(X86::ESP, MVT::i32), // Base
19554 DAG.getTargetConstant(1, MVT::i8), // Scale
19555 DAG.getRegister(0, MVT::i32), // Index
19556 DAG.getTargetConstant(0, MVT::i32), // Disp
19557 DAG.getRegister(0, MVT::i32), // Segment.
19561 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19562 return SDValue(Res, 0);
19565 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19566 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19569 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19570 SelectionDAG &DAG) {
19571 MVT T = Op.getSimpleValueType();
19575 switch(T.SimpleTy) {
19576 default: llvm_unreachable("Invalid value type!");
19577 case MVT::i8: Reg = X86::AL; size = 1; break;
19578 case MVT::i16: Reg = X86::AX; size = 2; break;
19579 case MVT::i32: Reg = X86::EAX; size = 4; break;
19581 assert(Subtarget->is64Bit() && "Node not type legal!");
19582 Reg = X86::RAX; size = 8;
19585 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19586 Op.getOperand(2), SDValue());
19587 SDValue Ops[] = { cpIn.getValue(0),
19590 DAG.getTargetConstant(size, MVT::i8),
19591 cpIn.getValue(1) };
19592 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19593 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19594 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19598 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19599 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19600 MVT::i32, cpOut.getValue(2));
19601 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19602 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19604 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19605 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19606 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19610 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19611 SelectionDAG &DAG) {
19612 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19613 MVT DstVT = Op.getSimpleValueType();
19615 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19616 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19617 if (DstVT != MVT::f64)
19618 // This conversion needs to be expanded.
19621 SDValue InVec = Op->getOperand(0);
19623 unsigned NumElts = SrcVT.getVectorNumElements();
19624 EVT SVT = SrcVT.getVectorElementType();
19626 // Widen the vector in input in the case of MVT::v2i32.
19627 // Example: from MVT::v2i32 to MVT::v4i32.
19628 SmallVector<SDValue, 16> Elts;
19629 for (unsigned i = 0, e = NumElts; i != e; ++i)
19630 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19631 DAG.getIntPtrConstant(i)));
19633 // Explicitly mark the extra elements as Undef.
19634 Elts.append(NumElts, DAG.getUNDEF(SVT));
19636 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19637 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19638 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19639 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19640 DAG.getIntPtrConstant(0));
19643 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19644 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19645 assert((DstVT == MVT::i64 ||
19646 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19647 "Unexpected custom BITCAST");
19648 // i64 <=> MMX conversions are Legal.
19649 if (SrcVT==MVT::i64 && DstVT.isVector())
19651 if (DstVT==MVT::i64 && SrcVT.isVector())
19653 // MMX <=> MMX conversions are Legal.
19654 if (SrcVT.isVector() && DstVT.isVector())
19656 // All other conversions need to be expanded.
19660 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19661 SelectionDAG &DAG) {
19662 SDNode *Node = Op.getNode();
19665 Op = Op.getOperand(0);
19666 EVT VT = Op.getValueType();
19667 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19668 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19670 unsigned NumElts = VT.getVectorNumElements();
19671 EVT EltVT = VT.getVectorElementType();
19672 unsigned Len = EltVT.getSizeInBits();
19674 // This is the vectorized version of the "best" algorithm from
19675 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19676 // with a minor tweak to use a series of adds + shifts instead of vector
19677 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19679 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19680 // v8i32 => Always profitable
19682 // FIXME: There a couple of possible improvements:
19684 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19685 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19687 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19688 "CTPOP not implemented for this vector element type.");
19690 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19691 // extra legalization.
19692 bool NeedsBitcast = EltVT == MVT::i32;
19693 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19695 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19696 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19697 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19699 // v = v - ((v >> 1) & 0x55555555...)
19700 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19701 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19702 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19704 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19706 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19707 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19709 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19711 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19712 if (VT != And.getValueType())
19713 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19714 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19716 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19717 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19718 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19719 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19720 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19722 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19723 if (NeedsBitcast) {
19724 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19725 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19726 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19729 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19730 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19731 if (VT != AndRHS.getValueType()) {
19732 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19733 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19735 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19737 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19738 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19739 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19740 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19741 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19743 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19744 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19745 if (NeedsBitcast) {
19746 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19747 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19749 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19750 if (VT != And.getValueType())
19751 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19753 // The algorithm mentioned above uses:
19754 // v = (v * 0x01010101...) >> (Len - 8)
19756 // Change it to use vector adds + vector shifts which yield faster results on
19757 // Haswell than using vector integer multiplication.
19759 // For i32 elements:
19760 // v = v + (v >> 8)
19761 // v = v + (v >> 16)
19763 // For i64 elements:
19764 // v = v + (v >> 8)
19765 // v = v + (v >> 16)
19766 // v = v + (v >> 32)
19769 SmallVector<SDValue, 8> Csts;
19770 for (unsigned i = 8; i <= Len/2; i *= 2) {
19771 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19772 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19773 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19774 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19778 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19779 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19780 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19781 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19782 if (NeedsBitcast) {
19783 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19784 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19786 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19787 if (VT != And.getValueType())
19788 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19793 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19794 SDNode *Node = Op.getNode();
19796 EVT T = Node->getValueType(0);
19797 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19798 DAG.getConstant(0, T), Node->getOperand(2));
19799 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19800 cast<AtomicSDNode>(Node)->getMemoryVT(),
19801 Node->getOperand(0),
19802 Node->getOperand(1), negOp,
19803 cast<AtomicSDNode>(Node)->getMemOperand(),
19804 cast<AtomicSDNode>(Node)->getOrdering(),
19805 cast<AtomicSDNode>(Node)->getSynchScope());
19808 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19809 SDNode *Node = Op.getNode();
19811 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19813 // Convert seq_cst store -> xchg
19814 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19815 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19816 // (The only way to get a 16-byte store is cmpxchg16b)
19817 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19818 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19819 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19820 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19821 cast<AtomicSDNode>(Node)->getMemoryVT(),
19822 Node->getOperand(0),
19823 Node->getOperand(1), Node->getOperand(2),
19824 cast<AtomicSDNode>(Node)->getMemOperand(),
19825 cast<AtomicSDNode>(Node)->getOrdering(),
19826 cast<AtomicSDNode>(Node)->getSynchScope());
19827 return Swap.getValue(1);
19829 // Other atomic stores have a simple pattern.
19833 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19834 EVT VT = Op.getNode()->getSimpleValueType(0);
19836 // Let legalize expand this if it isn't a legal type yet.
19837 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19840 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19843 bool ExtraOp = false;
19844 switch (Op.getOpcode()) {
19845 default: llvm_unreachable("Invalid code");
19846 case ISD::ADDC: Opc = X86ISD::ADD; break;
19847 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19848 case ISD::SUBC: Opc = X86ISD::SUB; break;
19849 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19853 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19855 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19856 Op.getOperand(1), Op.getOperand(2));
19859 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19860 SelectionDAG &DAG) {
19861 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19863 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19864 // which returns the values as { float, float } (in XMM0) or
19865 // { double, double } (which is returned in XMM0, XMM1).
19867 SDValue Arg = Op.getOperand(0);
19868 EVT ArgVT = Arg.getValueType();
19869 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19871 TargetLowering::ArgListTy Args;
19872 TargetLowering::ArgListEntry Entry;
19876 Entry.isSExt = false;
19877 Entry.isZExt = false;
19878 Args.push_back(Entry);
19880 bool isF64 = ArgVT == MVT::f64;
19881 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19882 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19883 // the results are returned via SRet in memory.
19884 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19885 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19886 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
19888 Type *RetTy = isF64
19889 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19890 : (Type*)VectorType::get(ArgTy, 4);
19892 TargetLowering::CallLoweringInfo CLI(DAG);
19893 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19894 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19896 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19899 // Returned in xmm0 and xmm1.
19900 return CallResult.first;
19902 // Returned in bits 0:31 and 32:64 xmm0.
19903 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19904 CallResult.first, DAG.getIntPtrConstant(0));
19905 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19906 CallResult.first, DAG.getIntPtrConstant(1));
19907 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19908 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19911 /// LowerOperation - Provide custom lowering hooks for some operations.
19913 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
19914 switch (Op.getOpcode()) {
19915 default: llvm_unreachable("Should not custom lower this!");
19916 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
19917 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
19918 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
19919 return LowerCMP_SWAP(Op, Subtarget, DAG);
19920 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
19921 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
19922 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
19923 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
19924 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
19925 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
19926 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
19927 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
19928 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
19929 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
19930 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
19931 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
19932 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
19933 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
19934 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
19935 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
19936 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
19937 case ISD::SHL_PARTS:
19938 case ISD::SRA_PARTS:
19939 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
19940 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
19941 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
19942 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
19943 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
19944 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
19945 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
19946 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
19947 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
19948 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
19949 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
19951 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
19952 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
19953 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
19954 case ISD::SETCC: return LowerSETCC(Op, DAG);
19955 case ISD::SELECT: return LowerSELECT(Op, DAG);
19956 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
19957 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
19958 case ISD::VASTART: return LowerVASTART(Op, DAG);
19959 case ISD::VAARG: return LowerVAARG(Op, DAG);
19960 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
19961 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
19962 case ISD::INTRINSIC_VOID:
19963 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
19964 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
19965 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
19966 case ISD::FRAME_TO_ARGS_OFFSET:
19967 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
19968 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
19969 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
19970 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
19971 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
19972 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
19973 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
19974 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
19975 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
19976 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
19977 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
19978 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
19979 case ISD::UMUL_LOHI:
19980 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
19983 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
19989 case ISD::UMULO: return LowerXALUO(Op, DAG);
19990 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
19991 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
19995 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
19996 case ISD::ADD: return LowerADD(Op, DAG);
19997 case ISD::SUB: return LowerSUB(Op, DAG);
19998 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
20002 /// ReplaceNodeResults - Replace a node with an illegal result type
20003 /// with a new node built out of custom code.
20004 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
20005 SmallVectorImpl<SDValue>&Results,
20006 SelectionDAG &DAG) const {
20008 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20009 switch (N->getOpcode()) {
20011 llvm_unreachable("Do not know how to custom type legalize this operation!");
20012 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
20013 case X86ISD::FMINC:
20015 case X86ISD::FMAXC:
20016 case X86ISD::FMAX: {
20017 EVT VT = N->getValueType(0);
20018 if (VT != MVT::v2f32)
20019 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
20020 SDValue UNDEF = DAG.getUNDEF(VT);
20021 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20022 N->getOperand(0), UNDEF);
20023 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20024 N->getOperand(1), UNDEF);
20025 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
20028 case ISD::SIGN_EXTEND_INREG:
20033 // We don't want to expand or promote these.
20040 case ISD::UDIVREM: {
20041 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
20042 Results.push_back(V);
20045 case ISD::FP_TO_SINT:
20046 case ISD::FP_TO_UINT: {
20047 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
20049 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
20052 std::pair<SDValue,SDValue> Vals =
20053 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
20054 SDValue FIST = Vals.first, StackSlot = Vals.second;
20055 if (FIST.getNode()) {
20056 EVT VT = N->getValueType(0);
20057 // Return a load from the stack slot.
20058 if (StackSlot.getNode())
20059 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
20060 MachinePointerInfo(),
20061 false, false, false, 0));
20063 Results.push_back(FIST);
20067 case ISD::UINT_TO_FP: {
20068 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20069 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
20070 N->getValueType(0) != MVT::v2f32)
20072 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
20074 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
20076 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
20077 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
20078 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
20079 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
20080 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
20081 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
20084 case ISD::FP_ROUND: {
20085 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
20087 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
20088 Results.push_back(V);
20091 case ISD::INTRINSIC_W_CHAIN: {
20092 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
20094 default : llvm_unreachable("Do not know how to custom type "
20095 "legalize this intrinsic operation!");
20096 case Intrinsic::x86_rdtsc:
20097 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20099 case Intrinsic::x86_rdtscp:
20100 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
20102 case Intrinsic::x86_rdpmc:
20103 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
20106 case ISD::READCYCLECOUNTER: {
20107 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20110 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
20111 EVT T = N->getValueType(0);
20112 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
20113 bool Regs64bit = T == MVT::i128;
20114 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
20115 SDValue cpInL, cpInH;
20116 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20117 DAG.getConstant(0, HalfT));
20118 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20119 DAG.getConstant(1, HalfT));
20120 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
20121 Regs64bit ? X86::RAX : X86::EAX,
20123 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
20124 Regs64bit ? X86::RDX : X86::EDX,
20125 cpInH, cpInL.getValue(1));
20126 SDValue swapInL, swapInH;
20127 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20128 DAG.getConstant(0, HalfT));
20129 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20130 DAG.getConstant(1, HalfT));
20131 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
20132 Regs64bit ? X86::RBX : X86::EBX,
20133 swapInL, cpInH.getValue(1));
20134 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
20135 Regs64bit ? X86::RCX : X86::ECX,
20136 swapInH, swapInL.getValue(1));
20137 SDValue Ops[] = { swapInH.getValue(0),
20139 swapInH.getValue(1) };
20140 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20141 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
20142 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
20143 X86ISD::LCMPXCHG8_DAG;
20144 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
20145 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
20146 Regs64bit ? X86::RAX : X86::EAX,
20147 HalfT, Result.getValue(1));
20148 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
20149 Regs64bit ? X86::RDX : X86::EDX,
20150 HalfT, cpOutL.getValue(2));
20151 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
20153 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
20154 MVT::i32, cpOutH.getValue(2));
20156 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20157 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
20158 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
20160 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
20161 Results.push_back(Success);
20162 Results.push_back(EFLAGS.getValue(1));
20165 case ISD::ATOMIC_SWAP:
20166 case ISD::ATOMIC_LOAD_ADD:
20167 case ISD::ATOMIC_LOAD_SUB:
20168 case ISD::ATOMIC_LOAD_AND:
20169 case ISD::ATOMIC_LOAD_OR:
20170 case ISD::ATOMIC_LOAD_XOR:
20171 case ISD::ATOMIC_LOAD_NAND:
20172 case ISD::ATOMIC_LOAD_MIN:
20173 case ISD::ATOMIC_LOAD_MAX:
20174 case ISD::ATOMIC_LOAD_UMIN:
20175 case ISD::ATOMIC_LOAD_UMAX:
20176 case ISD::ATOMIC_LOAD: {
20177 // Delegate to generic TypeLegalization. Situations we can really handle
20178 // should have already been dealt with by AtomicExpandPass.cpp.
20181 case ISD::BITCAST: {
20182 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20183 EVT DstVT = N->getValueType(0);
20184 EVT SrcVT = N->getOperand(0)->getValueType(0);
20186 if (SrcVT != MVT::f64 ||
20187 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
20190 unsigned NumElts = DstVT.getVectorNumElements();
20191 EVT SVT = DstVT.getVectorElementType();
20192 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20193 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
20194 MVT::v2f64, N->getOperand(0));
20195 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
20197 if (ExperimentalVectorWideningLegalization) {
20198 // If we are legalizing vectors by widening, we already have the desired
20199 // legal vector type, just return it.
20200 Results.push_back(ToVecInt);
20204 SmallVector<SDValue, 8> Elts;
20205 for (unsigned i = 0, e = NumElts; i != e; ++i)
20206 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
20207 ToVecInt, DAG.getIntPtrConstant(i)));
20209 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
20214 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
20216 default: return nullptr;
20217 case X86ISD::BSF: return "X86ISD::BSF";
20218 case X86ISD::BSR: return "X86ISD::BSR";
20219 case X86ISD::SHLD: return "X86ISD::SHLD";
20220 case X86ISD::SHRD: return "X86ISD::SHRD";
20221 case X86ISD::FAND: return "X86ISD::FAND";
20222 case X86ISD::FANDN: return "X86ISD::FANDN";
20223 case X86ISD::FOR: return "X86ISD::FOR";
20224 case X86ISD::FXOR: return "X86ISD::FXOR";
20225 case X86ISD::FSRL: return "X86ISD::FSRL";
20226 case X86ISD::FILD: return "X86ISD::FILD";
20227 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
20228 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
20229 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
20230 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
20231 case X86ISD::FLD: return "X86ISD::FLD";
20232 case X86ISD::FST: return "X86ISD::FST";
20233 case X86ISD::CALL: return "X86ISD::CALL";
20234 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
20235 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
20236 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
20237 case X86ISD::BT: return "X86ISD::BT";
20238 case X86ISD::CMP: return "X86ISD::CMP";
20239 case X86ISD::COMI: return "X86ISD::COMI";
20240 case X86ISD::UCOMI: return "X86ISD::UCOMI";
20241 case X86ISD::CMPM: return "X86ISD::CMPM";
20242 case X86ISD::CMPMU: return "X86ISD::CMPMU";
20243 case X86ISD::SETCC: return "X86ISD::SETCC";
20244 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
20245 case X86ISD::FSETCC: return "X86ISD::FSETCC";
20246 case X86ISD::CMOV: return "X86ISD::CMOV";
20247 case X86ISD::BRCOND: return "X86ISD::BRCOND";
20248 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
20249 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
20250 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
20251 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
20252 case X86ISD::Wrapper: return "X86ISD::Wrapper";
20253 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
20254 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20255 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20256 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20257 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20258 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20259 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20260 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20261 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20262 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20263 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20264 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20265 case X86ISD::HADD: return "X86ISD::HADD";
20266 case X86ISD::HSUB: return "X86ISD::HSUB";
20267 case X86ISD::FHADD: return "X86ISD::FHADD";
20268 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20269 case X86ISD::UMAX: return "X86ISD::UMAX";
20270 case X86ISD::UMIN: return "X86ISD::UMIN";
20271 case X86ISD::SMAX: return "X86ISD::SMAX";
20272 case X86ISD::SMIN: return "X86ISD::SMIN";
20273 case X86ISD::FMAX: return "X86ISD::FMAX";
20274 case X86ISD::FMIN: return "X86ISD::FMIN";
20275 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20276 case X86ISD::FMINC: return "X86ISD::FMINC";
20277 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20278 case X86ISD::FRCP: return "X86ISD::FRCP";
20279 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20280 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20281 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20282 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20283 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20284 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20285 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20286 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20287 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20288 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20289 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20290 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20291 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20292 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20293 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20294 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20295 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20296 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
20297 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20298 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20299 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20300 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20301 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20302 case X86ISD::VSHL: return "X86ISD::VSHL";
20303 case X86ISD::VSRL: return "X86ISD::VSRL";
20304 case X86ISD::VSRA: return "X86ISD::VSRA";
20305 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20306 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20307 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20308 case X86ISD::CMPP: return "X86ISD::CMPP";
20309 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20310 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20311 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20312 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20313 case X86ISD::ADD: return "X86ISD::ADD";
20314 case X86ISD::SUB: return "X86ISD::SUB";
20315 case X86ISD::ADC: return "X86ISD::ADC";
20316 case X86ISD::SBB: return "X86ISD::SBB";
20317 case X86ISD::SMUL: return "X86ISD::SMUL";
20318 case X86ISD::UMUL: return "X86ISD::UMUL";
20319 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20320 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20321 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20322 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20323 case X86ISD::INC: return "X86ISD::INC";
20324 case X86ISD::DEC: return "X86ISD::DEC";
20325 case X86ISD::OR: return "X86ISD::OR";
20326 case X86ISD::XOR: return "X86ISD::XOR";
20327 case X86ISD::AND: return "X86ISD::AND";
20328 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20329 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20330 case X86ISD::PTEST: return "X86ISD::PTEST";
20331 case X86ISD::TESTP: return "X86ISD::TESTP";
20332 case X86ISD::TESTM: return "X86ISD::TESTM";
20333 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20334 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20335 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20336 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20337 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20338 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20339 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20340 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20341 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20342 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20343 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20344 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20345 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20346 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20347 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20348 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20349 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20350 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20351 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20352 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20353 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20354 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20355 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20356 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20357 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20358 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20359 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20360 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20361 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20362 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20363 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20364 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20365 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20366 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20367 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20368 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20369 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20370 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20371 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20372 case X86ISD::SAHF: return "X86ISD::SAHF";
20373 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20374 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20375 case X86ISD::FMADD: return "X86ISD::FMADD";
20376 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20377 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20378 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20379 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20380 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20381 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20382 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20383 case X86ISD::XTEST: return "X86ISD::XTEST";
20384 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20385 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20386 case X86ISD::SELECT: return "X86ISD::SELECT";
20387 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20388 case X86ISD::RCP28: return "X86ISD::RCP28";
20389 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20390 case X86ISD::FADD_RND: return "X86ISD::FADD_RND";
20391 case X86ISD::FSUB_RND: return "X86ISD::FSUB_RND";
20392 case X86ISD::FMUL_RND: return "X86ISD::FMUL_RND";
20393 case X86ISD::FDIV_RND: return "X86ISD::FDIV_RND";
20397 // isLegalAddressingMode - Return true if the addressing mode represented
20398 // by AM is legal for this target, for a load/store of the specified type.
20399 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20401 // X86 supports extremely general addressing modes.
20402 CodeModel::Model M = getTargetMachine().getCodeModel();
20403 Reloc::Model R = getTargetMachine().getRelocationModel();
20405 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20406 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20411 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20413 // If a reference to this global requires an extra load, we can't fold it.
20414 if (isGlobalStubReference(GVFlags))
20417 // If BaseGV requires a register for the PIC base, we cannot also have a
20418 // BaseReg specified.
20419 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20422 // If lower 4G is not available, then we must use rip-relative addressing.
20423 if ((M != CodeModel::Small || R != Reloc::Static) &&
20424 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20428 switch (AM.Scale) {
20434 // These scales always work.
20439 // These scales are formed with basereg+scalereg. Only accept if there is
20444 default: // Other stuff never works.
20451 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20452 unsigned Bits = Ty->getScalarSizeInBits();
20454 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20455 // particularly cheaper than those without.
20459 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20460 // variable shifts just as cheap as scalar ones.
20461 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20464 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20465 // fully general vector.
20469 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20470 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20472 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20473 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20474 return NumBits1 > NumBits2;
20477 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20478 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20481 if (!isTypeLegal(EVT::getEVT(Ty1)))
20484 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20486 // Assuming the caller doesn't have a zeroext or signext return parameter,
20487 // truncation all the way down to i1 is valid.
20491 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20492 return isInt<32>(Imm);
20495 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20496 // Can also use sub to handle negated immediates.
20497 return isInt<32>(Imm);
20500 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20501 if (!VT1.isInteger() || !VT2.isInteger())
20503 unsigned NumBits1 = VT1.getSizeInBits();
20504 unsigned NumBits2 = VT2.getSizeInBits();
20505 return NumBits1 > NumBits2;
20508 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20509 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20510 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20513 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20514 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20515 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20518 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20519 EVT VT1 = Val.getValueType();
20520 if (isZExtFree(VT1, VT2))
20523 if (Val.getOpcode() != ISD::LOAD)
20526 if (!VT1.isSimple() || !VT1.isInteger() ||
20527 !VT2.isSimple() || !VT2.isInteger())
20530 switch (VT1.getSimpleVT().SimpleTy) {
20535 // X86 has 8, 16, and 32-bit zero-extending loads.
20542 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
20545 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20546 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20549 VT = VT.getScalarType();
20551 if (!VT.isSimple())
20554 switch (VT.getSimpleVT().SimpleTy) {
20565 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20566 // i16 instructions are longer (0x66 prefix) and potentially slower.
20567 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20570 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20571 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20572 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20573 /// are assumed to be legal.
20575 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20577 if (!VT.isSimple())
20580 MVT SVT = VT.getSimpleVT();
20582 // Very little shuffling can be done for 64-bit vectors right now.
20583 if (VT.getSizeInBits() == 64)
20586 // This is an experimental legality test that is tailored to match the
20587 // legality test of the experimental lowering more closely. They are gated
20588 // separately to ease testing of performance differences.
20589 if (ExperimentalVectorShuffleLegality)
20590 // We only care that the types being shuffled are legal. The lowering can
20591 // handle any possible shuffle mask that results.
20592 return isTypeLegal(SVT);
20594 // If this is a single-input shuffle with no 128 bit lane crossings we can
20595 // lower it into pshufb.
20596 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20597 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20598 bool isLegal = true;
20599 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20600 if (M[I] >= (int)SVT.getVectorNumElements() ||
20601 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20610 // FIXME: blends, shifts.
20611 return (SVT.getVectorNumElements() == 2 ||
20612 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20613 isMOVLMask(M, SVT) ||
20614 isCommutedMOVLMask(M, SVT) ||
20615 isMOVHLPSMask(M, SVT) ||
20616 isSHUFPMask(M, SVT) ||
20617 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20618 isPSHUFDMask(M, SVT) ||
20619 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20620 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20621 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20622 isPALIGNRMask(M, SVT, Subtarget) ||
20623 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20624 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20625 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20626 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20627 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20628 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20632 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20634 if (!VT.isSimple())
20637 MVT SVT = VT.getSimpleVT();
20639 // This is an experimental legality test that is tailored to match the
20640 // legality test of the experimental lowering more closely. They are gated
20641 // separately to ease testing of performance differences.
20642 if (ExperimentalVectorShuffleLegality)
20643 // The new vector shuffle lowering is very good at managing zero-inputs.
20644 return isShuffleMaskLegal(Mask, VT);
20646 unsigned NumElts = SVT.getVectorNumElements();
20647 // FIXME: This collection of masks seems suspect.
20650 if (NumElts == 4 && SVT.is128BitVector()) {
20651 return (isMOVLMask(Mask, SVT) ||
20652 isCommutedMOVLMask(Mask, SVT, true) ||
20653 isSHUFPMask(Mask, SVT) ||
20654 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20655 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20656 Subtarget->hasInt256()));
20661 //===----------------------------------------------------------------------===//
20662 // X86 Scheduler Hooks
20663 //===----------------------------------------------------------------------===//
20665 /// Utility function to emit xbegin specifying the start of an RTM region.
20666 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20667 const TargetInstrInfo *TII) {
20668 DebugLoc DL = MI->getDebugLoc();
20670 const BasicBlock *BB = MBB->getBasicBlock();
20671 MachineFunction::iterator I = MBB;
20674 // For the v = xbegin(), we generate
20685 MachineBasicBlock *thisMBB = MBB;
20686 MachineFunction *MF = MBB->getParent();
20687 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20688 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20689 MF->insert(I, mainMBB);
20690 MF->insert(I, sinkMBB);
20692 // Transfer the remainder of BB and its successor edges to sinkMBB.
20693 sinkMBB->splice(sinkMBB->begin(), MBB,
20694 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20695 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20699 // # fallthrough to mainMBB
20700 // # abortion to sinkMBB
20701 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20702 thisMBB->addSuccessor(mainMBB);
20703 thisMBB->addSuccessor(sinkMBB);
20707 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20708 mainMBB->addSuccessor(sinkMBB);
20711 // EAX is live into the sinkMBB
20712 sinkMBB->addLiveIn(X86::EAX);
20713 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20714 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20717 MI->eraseFromParent();
20721 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20722 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20723 // in the .td file.
20724 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20725 const TargetInstrInfo *TII) {
20727 switch (MI->getOpcode()) {
20728 default: llvm_unreachable("illegal opcode!");
20729 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20730 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20731 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20732 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20733 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20734 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20735 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20736 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20739 DebugLoc dl = MI->getDebugLoc();
20740 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20742 unsigned NumArgs = MI->getNumOperands();
20743 for (unsigned i = 1; i < NumArgs; ++i) {
20744 MachineOperand &Op = MI->getOperand(i);
20745 if (!(Op.isReg() && Op.isImplicit()))
20746 MIB.addOperand(Op);
20748 if (MI->hasOneMemOperand())
20749 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20751 BuildMI(*BB, MI, dl,
20752 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20753 .addReg(X86::XMM0);
20755 MI->eraseFromParent();
20759 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20760 // defs in an instruction pattern
20761 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20762 const TargetInstrInfo *TII) {
20764 switch (MI->getOpcode()) {
20765 default: llvm_unreachable("illegal opcode!");
20766 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20767 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20768 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20769 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20770 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20771 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20772 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20773 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20776 DebugLoc dl = MI->getDebugLoc();
20777 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20779 unsigned NumArgs = MI->getNumOperands(); // remove the results
20780 for (unsigned i = 1; i < NumArgs; ++i) {
20781 MachineOperand &Op = MI->getOperand(i);
20782 if (!(Op.isReg() && Op.isImplicit()))
20783 MIB.addOperand(Op);
20785 if (MI->hasOneMemOperand())
20786 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20788 BuildMI(*BB, MI, dl,
20789 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20792 MI->eraseFromParent();
20796 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20797 const X86Subtarget *Subtarget) {
20798 DebugLoc dl = MI->getDebugLoc();
20799 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20800 // Address into RAX/EAX, other two args into ECX, EDX.
20801 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20802 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20803 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20804 for (int i = 0; i < X86::AddrNumOperands; ++i)
20805 MIB.addOperand(MI->getOperand(i));
20807 unsigned ValOps = X86::AddrNumOperands;
20808 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20809 .addReg(MI->getOperand(ValOps).getReg());
20810 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20811 .addReg(MI->getOperand(ValOps+1).getReg());
20813 // The instruction doesn't actually take any operands though.
20814 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20816 MI->eraseFromParent(); // The pseudo is gone now.
20820 MachineBasicBlock *
20821 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
20822 MachineBasicBlock *MBB) const {
20823 // Emit va_arg instruction on X86-64.
20825 // Operands to this pseudo-instruction:
20826 // 0 ) Output : destination address (reg)
20827 // 1-5) Input : va_list address (addr, i64mem)
20828 // 6 ) ArgSize : Size (in bytes) of vararg type
20829 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20830 // 8 ) Align : Alignment of type
20831 // 9 ) EFLAGS (implicit-def)
20833 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20834 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20836 unsigned DestReg = MI->getOperand(0).getReg();
20837 MachineOperand &Base = MI->getOperand(1);
20838 MachineOperand &Scale = MI->getOperand(2);
20839 MachineOperand &Index = MI->getOperand(3);
20840 MachineOperand &Disp = MI->getOperand(4);
20841 MachineOperand &Segment = MI->getOperand(5);
20842 unsigned ArgSize = MI->getOperand(6).getImm();
20843 unsigned ArgMode = MI->getOperand(7).getImm();
20844 unsigned Align = MI->getOperand(8).getImm();
20846 // Memory Reference
20847 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20848 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20849 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20851 // Machine Information
20852 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20853 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20854 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20855 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20856 DebugLoc DL = MI->getDebugLoc();
20858 // struct va_list {
20861 // i64 overflow_area (address)
20862 // i64 reg_save_area (address)
20864 // sizeof(va_list) = 24
20865 // alignment(va_list) = 8
20867 unsigned TotalNumIntRegs = 6;
20868 unsigned TotalNumXMMRegs = 8;
20869 bool UseGPOffset = (ArgMode == 1);
20870 bool UseFPOffset = (ArgMode == 2);
20871 unsigned MaxOffset = TotalNumIntRegs * 8 +
20872 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
20874 /* Align ArgSize to a multiple of 8 */
20875 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
20876 bool NeedsAlign = (Align > 8);
20878 MachineBasicBlock *thisMBB = MBB;
20879 MachineBasicBlock *overflowMBB;
20880 MachineBasicBlock *offsetMBB;
20881 MachineBasicBlock *endMBB;
20883 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
20884 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
20885 unsigned OffsetReg = 0;
20887 if (!UseGPOffset && !UseFPOffset) {
20888 // If we only pull from the overflow region, we don't create a branch.
20889 // We don't need to alter control flow.
20890 OffsetDestReg = 0; // unused
20891 OverflowDestReg = DestReg;
20893 offsetMBB = nullptr;
20894 overflowMBB = thisMBB;
20897 // First emit code to check if gp_offset (or fp_offset) is below the bound.
20898 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
20899 // If not, pull from overflow_area. (branch to overflowMBB)
20904 // offsetMBB overflowMBB
20909 // Registers for the PHI in endMBB
20910 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
20911 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
20913 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20914 MachineFunction *MF = MBB->getParent();
20915 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20916 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20917 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20919 MachineFunction::iterator MBBIter = MBB;
20922 // Insert the new basic blocks
20923 MF->insert(MBBIter, offsetMBB);
20924 MF->insert(MBBIter, overflowMBB);
20925 MF->insert(MBBIter, endMBB);
20927 // Transfer the remainder of MBB and its successor edges to endMBB.
20928 endMBB->splice(endMBB->begin(), thisMBB,
20929 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
20930 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
20932 // Make offsetMBB and overflowMBB successors of thisMBB
20933 thisMBB->addSuccessor(offsetMBB);
20934 thisMBB->addSuccessor(overflowMBB);
20936 // endMBB is a successor of both offsetMBB and overflowMBB
20937 offsetMBB->addSuccessor(endMBB);
20938 overflowMBB->addSuccessor(endMBB);
20940 // Load the offset value into a register
20941 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20942 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
20946 .addDisp(Disp, UseFPOffset ? 4 : 0)
20947 .addOperand(Segment)
20948 .setMemRefs(MMOBegin, MMOEnd);
20950 // Check if there is enough room left to pull this argument.
20951 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
20953 .addImm(MaxOffset + 8 - ArgSizeA8);
20955 // Branch to "overflowMBB" if offset >= max
20956 // Fall through to "offsetMBB" otherwise
20957 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
20958 .addMBB(overflowMBB);
20961 // In offsetMBB, emit code to use the reg_save_area.
20963 assert(OffsetReg != 0);
20965 // Read the reg_save_area address.
20966 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
20967 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
20972 .addOperand(Segment)
20973 .setMemRefs(MMOBegin, MMOEnd);
20975 // Zero-extend the offset
20976 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
20977 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
20980 .addImm(X86::sub_32bit);
20982 // Add the offset to the reg_save_area to get the final address.
20983 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
20984 .addReg(OffsetReg64)
20985 .addReg(RegSaveReg);
20987 // Compute the offset for the next argument
20988 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20989 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
20991 .addImm(UseFPOffset ? 16 : 8);
20993 // Store it back into the va_list.
20994 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
20998 .addDisp(Disp, UseFPOffset ? 4 : 0)
20999 .addOperand(Segment)
21000 .addReg(NextOffsetReg)
21001 .setMemRefs(MMOBegin, MMOEnd);
21004 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
21009 // Emit code to use overflow area
21012 // Load the overflow_area address into a register.
21013 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
21014 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
21019 .addOperand(Segment)
21020 .setMemRefs(MMOBegin, MMOEnd);
21022 // If we need to align it, do so. Otherwise, just copy the address
21023 // to OverflowDestReg.
21025 // Align the overflow address
21026 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
21027 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
21029 // aligned_addr = (addr + (align-1)) & ~(align-1)
21030 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
21031 .addReg(OverflowAddrReg)
21034 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
21036 .addImm(~(uint64_t)(Align-1));
21038 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
21039 .addReg(OverflowAddrReg);
21042 // Compute the next overflow address after this argument.
21043 // (the overflow address should be kept 8-byte aligned)
21044 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
21045 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
21046 .addReg(OverflowDestReg)
21047 .addImm(ArgSizeA8);
21049 // Store the new overflow address.
21050 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
21055 .addOperand(Segment)
21056 .addReg(NextAddrReg)
21057 .setMemRefs(MMOBegin, MMOEnd);
21059 // If we branched, emit the PHI to the front of endMBB.
21061 BuildMI(*endMBB, endMBB->begin(), DL,
21062 TII->get(X86::PHI), DestReg)
21063 .addReg(OffsetDestReg).addMBB(offsetMBB)
21064 .addReg(OverflowDestReg).addMBB(overflowMBB);
21067 // Erase the pseudo instruction
21068 MI->eraseFromParent();
21073 MachineBasicBlock *
21074 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
21076 MachineBasicBlock *MBB) const {
21077 // Emit code to save XMM registers to the stack. The ABI says that the
21078 // number of registers to save is given in %al, so it's theoretically
21079 // possible to do an indirect jump trick to avoid saving all of them,
21080 // however this code takes a simpler approach and just executes all
21081 // of the stores if %al is non-zero. It's less code, and it's probably
21082 // easier on the hardware branch predictor, and stores aren't all that
21083 // expensive anyway.
21085 // Create the new basic blocks. One block contains all the XMM stores,
21086 // and one block is the final destination regardless of whether any
21087 // stores were performed.
21088 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21089 MachineFunction *F = MBB->getParent();
21090 MachineFunction::iterator MBBIter = MBB;
21092 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
21093 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
21094 F->insert(MBBIter, XMMSaveMBB);
21095 F->insert(MBBIter, EndMBB);
21097 // Transfer the remainder of MBB and its successor edges to EndMBB.
21098 EndMBB->splice(EndMBB->begin(), MBB,
21099 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21100 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
21102 // The original block will now fall through to the XMM save block.
21103 MBB->addSuccessor(XMMSaveMBB);
21104 // The XMMSaveMBB will fall through to the end block.
21105 XMMSaveMBB->addSuccessor(EndMBB);
21107 // Now add the instructions.
21108 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21109 DebugLoc DL = MI->getDebugLoc();
21111 unsigned CountReg = MI->getOperand(0).getReg();
21112 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
21113 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
21115 if (!Subtarget->isTargetWin64()) {
21116 // If %al is 0, branch around the XMM save block.
21117 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
21118 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
21119 MBB->addSuccessor(EndMBB);
21122 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
21123 // that was just emitted, but clearly shouldn't be "saved".
21124 assert((MI->getNumOperands() <= 3 ||
21125 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
21126 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
21127 && "Expected last argument to be EFLAGS");
21128 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
21129 // In the XMM save block, save all the XMM argument registers.
21130 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
21131 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
21132 MachineMemOperand *MMO =
21133 F->getMachineMemOperand(
21134 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
21135 MachineMemOperand::MOStore,
21136 /*Size=*/16, /*Align=*/16);
21137 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
21138 .addFrameIndex(RegSaveFrameIndex)
21139 .addImm(/*Scale=*/1)
21140 .addReg(/*IndexReg=*/0)
21141 .addImm(/*Disp=*/Offset)
21142 .addReg(/*Segment=*/0)
21143 .addReg(MI->getOperand(i).getReg())
21144 .addMemOperand(MMO);
21147 MI->eraseFromParent(); // The pseudo instruction is gone now.
21152 // The EFLAGS operand of SelectItr might be missing a kill marker
21153 // because there were multiple uses of EFLAGS, and ISel didn't know
21154 // which to mark. Figure out whether SelectItr should have had a
21155 // kill marker, and set it if it should. Returns the correct kill
21157 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
21158 MachineBasicBlock* BB,
21159 const TargetRegisterInfo* TRI) {
21160 // Scan forward through BB for a use/def of EFLAGS.
21161 MachineBasicBlock::iterator miI(std::next(SelectItr));
21162 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
21163 const MachineInstr& mi = *miI;
21164 if (mi.readsRegister(X86::EFLAGS))
21166 if (mi.definesRegister(X86::EFLAGS))
21167 break; // Should have kill-flag - update below.
21170 // If we hit the end of the block, check whether EFLAGS is live into a
21172 if (miI == BB->end()) {
21173 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
21174 sEnd = BB->succ_end();
21175 sItr != sEnd; ++sItr) {
21176 MachineBasicBlock* succ = *sItr;
21177 if (succ->isLiveIn(X86::EFLAGS))
21182 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
21183 // out. SelectMI should have a kill flag on EFLAGS.
21184 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
21188 MachineBasicBlock *
21189 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
21190 MachineBasicBlock *BB) const {
21191 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21192 DebugLoc DL = MI->getDebugLoc();
21194 // To "insert" a SELECT_CC instruction, we actually have to insert the
21195 // diamond control-flow pattern. The incoming instruction knows the
21196 // destination vreg to set, the condition code register to branch on, the
21197 // true/false values to select between, and a branch opcode to use.
21198 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21199 MachineFunction::iterator It = BB;
21205 // cmpTY ccX, r1, r2
21207 // fallthrough --> copy0MBB
21208 MachineBasicBlock *thisMBB = BB;
21209 MachineFunction *F = BB->getParent();
21210 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
21211 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
21212 F->insert(It, copy0MBB);
21213 F->insert(It, sinkMBB);
21215 // If the EFLAGS register isn't dead in the terminator, then claim that it's
21216 // live into the sink and copy blocks.
21217 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
21218 if (!MI->killsRegister(X86::EFLAGS) &&
21219 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
21220 copy0MBB->addLiveIn(X86::EFLAGS);
21221 sinkMBB->addLiveIn(X86::EFLAGS);
21224 // Transfer the remainder of BB and its successor edges to sinkMBB.
21225 sinkMBB->splice(sinkMBB->begin(), BB,
21226 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21227 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
21229 // Add the true and fallthrough blocks as its successors.
21230 BB->addSuccessor(copy0MBB);
21231 BB->addSuccessor(sinkMBB);
21233 // Create the conditional branch instruction.
21235 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
21236 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
21239 // %FalseValue = ...
21240 // # fallthrough to sinkMBB
21241 copy0MBB->addSuccessor(sinkMBB);
21244 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
21246 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21247 TII->get(X86::PHI), MI->getOperand(0).getReg())
21248 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
21249 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
21251 MI->eraseFromParent(); // The pseudo instruction is gone now.
21255 MachineBasicBlock *
21256 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
21257 MachineBasicBlock *BB) const {
21258 MachineFunction *MF = BB->getParent();
21259 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21260 DebugLoc DL = MI->getDebugLoc();
21261 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21263 assert(MF->shouldSplitStack());
21265 const bool Is64Bit = Subtarget->is64Bit();
21266 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21268 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21269 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21272 // ... [Till the alloca]
21273 // If stacklet is not large enough, jump to mallocMBB
21276 // Allocate by subtracting from RSP
21277 // Jump to continueMBB
21280 // Allocate by call to runtime
21284 // [rest of original BB]
21287 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21288 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21289 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21291 MachineRegisterInfo &MRI = MF->getRegInfo();
21292 const TargetRegisterClass *AddrRegClass =
21293 getRegClassFor(getPointerTy());
21295 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21296 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21297 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21298 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21299 sizeVReg = MI->getOperand(1).getReg(),
21300 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21302 MachineFunction::iterator MBBIter = BB;
21305 MF->insert(MBBIter, bumpMBB);
21306 MF->insert(MBBIter, mallocMBB);
21307 MF->insert(MBBIter, continueMBB);
21309 continueMBB->splice(continueMBB->begin(), BB,
21310 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21311 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21313 // Add code to the main basic block to check if the stack limit has been hit,
21314 // and if so, jump to mallocMBB otherwise to bumpMBB.
21315 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21316 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21317 .addReg(tmpSPVReg).addReg(sizeVReg);
21318 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21319 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21320 .addReg(SPLimitVReg);
21321 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21323 // bumpMBB simply decreases the stack pointer, since we know the current
21324 // stacklet has enough space.
21325 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21326 .addReg(SPLimitVReg);
21327 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21328 .addReg(SPLimitVReg);
21329 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21331 // Calls into a routine in libgcc to allocate more space from the heap.
21332 const uint32_t *RegMask =
21333 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21335 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21337 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21338 .addExternalSymbol("__morestack_allocate_stack_space")
21339 .addRegMask(RegMask)
21340 .addReg(X86::RDI, RegState::Implicit)
21341 .addReg(X86::RAX, RegState::ImplicitDefine);
21342 } else if (Is64Bit) {
21343 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21345 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21346 .addExternalSymbol("__morestack_allocate_stack_space")
21347 .addRegMask(RegMask)
21348 .addReg(X86::EDI, RegState::Implicit)
21349 .addReg(X86::EAX, RegState::ImplicitDefine);
21351 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21353 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21354 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21355 .addExternalSymbol("__morestack_allocate_stack_space")
21356 .addRegMask(RegMask)
21357 .addReg(X86::EAX, RegState::ImplicitDefine);
21361 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21364 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21365 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21366 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21368 // Set up the CFG correctly.
21369 BB->addSuccessor(bumpMBB);
21370 BB->addSuccessor(mallocMBB);
21371 mallocMBB->addSuccessor(continueMBB);
21372 bumpMBB->addSuccessor(continueMBB);
21374 // Take care of the PHI nodes.
21375 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21376 MI->getOperand(0).getReg())
21377 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21378 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21380 // Delete the original pseudo instruction.
21381 MI->eraseFromParent();
21384 return continueMBB;
21387 MachineBasicBlock *
21388 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21389 MachineBasicBlock *BB) const {
21390 DebugLoc DL = MI->getDebugLoc();
21392 assert(!Subtarget->isTargetMachO());
21394 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21396 MI->eraseFromParent(); // The pseudo instruction is gone now.
21400 MachineBasicBlock *
21401 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21402 MachineBasicBlock *BB) const {
21403 // This is pretty easy. We're taking the value that we received from
21404 // our load from the relocation, sticking it in either RDI (x86-64)
21405 // or EAX and doing an indirect call. The return value will then
21406 // be in the normal return register.
21407 MachineFunction *F = BB->getParent();
21408 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21409 DebugLoc DL = MI->getDebugLoc();
21411 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21412 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21414 // Get a register mask for the lowered call.
21415 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21416 // proper register mask.
21417 const uint32_t *RegMask =
21418 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21419 if (Subtarget->is64Bit()) {
21420 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21421 TII->get(X86::MOV64rm), X86::RDI)
21423 .addImm(0).addReg(0)
21424 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21425 MI->getOperand(3).getTargetFlags())
21427 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21428 addDirectMem(MIB, X86::RDI);
21429 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21430 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21431 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21432 TII->get(X86::MOV32rm), X86::EAX)
21434 .addImm(0).addReg(0)
21435 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21436 MI->getOperand(3).getTargetFlags())
21438 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21439 addDirectMem(MIB, X86::EAX);
21440 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21442 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21443 TII->get(X86::MOV32rm), X86::EAX)
21444 .addReg(TII->getGlobalBaseReg(F))
21445 .addImm(0).addReg(0)
21446 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21447 MI->getOperand(3).getTargetFlags())
21449 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21450 addDirectMem(MIB, X86::EAX);
21451 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21454 MI->eraseFromParent(); // The pseudo instruction is gone now.
21458 MachineBasicBlock *
21459 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21460 MachineBasicBlock *MBB) const {
21461 DebugLoc DL = MI->getDebugLoc();
21462 MachineFunction *MF = MBB->getParent();
21463 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21464 MachineRegisterInfo &MRI = MF->getRegInfo();
21466 const BasicBlock *BB = MBB->getBasicBlock();
21467 MachineFunction::iterator I = MBB;
21470 // Memory Reference
21471 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21472 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21475 unsigned MemOpndSlot = 0;
21477 unsigned CurOp = 0;
21479 DstReg = MI->getOperand(CurOp++).getReg();
21480 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21481 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21482 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21483 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21485 MemOpndSlot = CurOp;
21487 MVT PVT = getPointerTy();
21488 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21489 "Invalid Pointer Size!");
21491 // For v = setjmp(buf), we generate
21494 // buf[LabelOffset] = restoreMBB
21495 // SjLjSetup restoreMBB
21501 // v = phi(main, restore)
21504 // if base pointer being used, load it from frame
21507 MachineBasicBlock *thisMBB = MBB;
21508 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21509 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21510 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21511 MF->insert(I, mainMBB);
21512 MF->insert(I, sinkMBB);
21513 MF->push_back(restoreMBB);
21515 MachineInstrBuilder MIB;
21517 // Transfer the remainder of BB and its successor edges to sinkMBB.
21518 sinkMBB->splice(sinkMBB->begin(), MBB,
21519 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21520 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21523 unsigned PtrStoreOpc = 0;
21524 unsigned LabelReg = 0;
21525 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21526 Reloc::Model RM = MF->getTarget().getRelocationModel();
21527 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21528 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21530 // Prepare IP either in reg or imm.
21531 if (!UseImmLabel) {
21532 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21533 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21534 LabelReg = MRI.createVirtualRegister(PtrRC);
21535 if (Subtarget->is64Bit()) {
21536 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21540 .addMBB(restoreMBB)
21543 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21544 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21545 .addReg(XII->getGlobalBaseReg(MF))
21548 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21552 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21554 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21555 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21556 if (i == X86::AddrDisp)
21557 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21559 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21562 MIB.addReg(LabelReg);
21564 MIB.addMBB(restoreMBB);
21565 MIB.setMemRefs(MMOBegin, MMOEnd);
21567 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21568 .addMBB(restoreMBB);
21570 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21571 MIB.addRegMask(RegInfo->getNoPreservedMask());
21572 thisMBB->addSuccessor(mainMBB);
21573 thisMBB->addSuccessor(restoreMBB);
21577 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21578 mainMBB->addSuccessor(sinkMBB);
21581 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21582 TII->get(X86::PHI), DstReg)
21583 .addReg(mainDstReg).addMBB(mainMBB)
21584 .addReg(restoreDstReg).addMBB(restoreMBB);
21587 if (RegInfo->hasBasePointer(*MF)) {
21588 const bool Uses64BitFramePtr =
21589 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
21590 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21591 X86FI->setRestoreBasePointer(MF);
21592 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21593 unsigned BasePtr = RegInfo->getBaseRegister();
21594 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21595 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21596 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21597 .setMIFlag(MachineInstr::FrameSetup);
21599 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21600 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21601 restoreMBB->addSuccessor(sinkMBB);
21603 MI->eraseFromParent();
21607 MachineBasicBlock *
21608 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21609 MachineBasicBlock *MBB) const {
21610 DebugLoc DL = MI->getDebugLoc();
21611 MachineFunction *MF = MBB->getParent();
21612 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21613 MachineRegisterInfo &MRI = MF->getRegInfo();
21615 // Memory Reference
21616 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21617 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21619 MVT PVT = getPointerTy();
21620 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21621 "Invalid Pointer Size!");
21623 const TargetRegisterClass *RC =
21624 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21625 unsigned Tmp = MRI.createVirtualRegister(RC);
21626 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21627 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21628 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21629 unsigned SP = RegInfo->getStackRegister();
21631 MachineInstrBuilder MIB;
21633 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21634 const int64_t SPOffset = 2 * PVT.getStoreSize();
21636 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21637 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21640 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21641 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21642 MIB.addOperand(MI->getOperand(i));
21643 MIB.setMemRefs(MMOBegin, MMOEnd);
21645 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21646 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21647 if (i == X86::AddrDisp)
21648 MIB.addDisp(MI->getOperand(i), LabelOffset);
21650 MIB.addOperand(MI->getOperand(i));
21652 MIB.setMemRefs(MMOBegin, MMOEnd);
21654 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21655 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21656 if (i == X86::AddrDisp)
21657 MIB.addDisp(MI->getOperand(i), SPOffset);
21659 MIB.addOperand(MI->getOperand(i));
21661 MIB.setMemRefs(MMOBegin, MMOEnd);
21663 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21665 MI->eraseFromParent();
21669 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21670 // accumulator loops. Writing back to the accumulator allows the coalescer
21671 // to remove extra copies in the loop.
21672 MachineBasicBlock *
21673 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21674 MachineBasicBlock *MBB) const {
21675 MachineOperand &AddendOp = MI->getOperand(3);
21677 // Bail out early if the addend isn't a register - we can't switch these.
21678 if (!AddendOp.isReg())
21681 MachineFunction &MF = *MBB->getParent();
21682 MachineRegisterInfo &MRI = MF.getRegInfo();
21684 // Check whether the addend is defined by a PHI:
21685 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21686 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21687 if (!AddendDef.isPHI())
21690 // Look for the following pattern:
21692 // %addend = phi [%entry, 0], [%loop, %result]
21694 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21698 // %addend = phi [%entry, 0], [%loop, %result]
21700 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21702 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21703 assert(AddendDef.getOperand(i).isReg());
21704 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21705 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21706 if (&PHISrcInst == MI) {
21707 // Found a matching instruction.
21708 unsigned NewFMAOpc = 0;
21709 switch (MI->getOpcode()) {
21710 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21711 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21712 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21713 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21714 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21715 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21716 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21717 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21718 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21719 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21720 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21721 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21722 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21723 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21724 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21725 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21726 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21727 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21728 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21729 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21731 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21732 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21733 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21734 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21735 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21736 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21737 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21738 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21739 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21740 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21741 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21742 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21743 default: llvm_unreachable("Unrecognized FMA variant.");
21746 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
21747 MachineInstrBuilder MIB =
21748 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21749 .addOperand(MI->getOperand(0))
21750 .addOperand(MI->getOperand(3))
21751 .addOperand(MI->getOperand(2))
21752 .addOperand(MI->getOperand(1));
21753 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21754 MI->eraseFromParent();
21761 MachineBasicBlock *
21762 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21763 MachineBasicBlock *BB) const {
21764 switch (MI->getOpcode()) {
21765 default: llvm_unreachable("Unexpected instr type to insert");
21766 case X86::TAILJMPd64:
21767 case X86::TAILJMPr64:
21768 case X86::TAILJMPm64:
21769 case X86::TAILJMPd64_REX:
21770 case X86::TAILJMPr64_REX:
21771 case X86::TAILJMPm64_REX:
21772 llvm_unreachable("TAILJMP64 would not be touched here.");
21773 case X86::TCRETURNdi64:
21774 case X86::TCRETURNri64:
21775 case X86::TCRETURNmi64:
21777 case X86::WIN_ALLOCA:
21778 return EmitLoweredWinAlloca(MI, BB);
21779 case X86::SEG_ALLOCA_32:
21780 case X86::SEG_ALLOCA_64:
21781 return EmitLoweredSegAlloca(MI, BB);
21782 case X86::TLSCall_32:
21783 case X86::TLSCall_64:
21784 return EmitLoweredTLSCall(MI, BB);
21785 case X86::CMOV_GR8:
21786 case X86::CMOV_FR32:
21787 case X86::CMOV_FR64:
21788 case X86::CMOV_V4F32:
21789 case X86::CMOV_V2F64:
21790 case X86::CMOV_V2I64:
21791 case X86::CMOV_V8F32:
21792 case X86::CMOV_V4F64:
21793 case X86::CMOV_V4I64:
21794 case X86::CMOV_V16F32:
21795 case X86::CMOV_V8F64:
21796 case X86::CMOV_V8I64:
21797 case X86::CMOV_GR16:
21798 case X86::CMOV_GR32:
21799 case X86::CMOV_RFP32:
21800 case X86::CMOV_RFP64:
21801 case X86::CMOV_RFP80:
21802 return EmitLoweredSelect(MI, BB);
21804 case X86::FP32_TO_INT16_IN_MEM:
21805 case X86::FP32_TO_INT32_IN_MEM:
21806 case X86::FP32_TO_INT64_IN_MEM:
21807 case X86::FP64_TO_INT16_IN_MEM:
21808 case X86::FP64_TO_INT32_IN_MEM:
21809 case X86::FP64_TO_INT64_IN_MEM:
21810 case X86::FP80_TO_INT16_IN_MEM:
21811 case X86::FP80_TO_INT32_IN_MEM:
21812 case X86::FP80_TO_INT64_IN_MEM: {
21813 MachineFunction *F = BB->getParent();
21814 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21815 DebugLoc DL = MI->getDebugLoc();
21817 // Change the floating point control register to use "round towards zero"
21818 // mode when truncating to an integer value.
21819 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21820 addFrameReference(BuildMI(*BB, MI, DL,
21821 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21823 // Load the old value of the high byte of the control word...
21825 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21826 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21829 // Set the high part to be round to zero...
21830 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21833 // Reload the modified control word now...
21834 addFrameReference(BuildMI(*BB, MI, DL,
21835 TII->get(X86::FLDCW16m)), CWFrameIdx);
21837 // Restore the memory image of control word to original value
21838 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21841 // Get the X86 opcode to use.
21843 switch (MI->getOpcode()) {
21844 default: llvm_unreachable("illegal opcode!");
21845 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21846 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21847 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21848 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21849 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21850 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21851 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21852 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21853 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21857 MachineOperand &Op = MI->getOperand(0);
21859 AM.BaseType = X86AddressMode::RegBase;
21860 AM.Base.Reg = Op.getReg();
21862 AM.BaseType = X86AddressMode::FrameIndexBase;
21863 AM.Base.FrameIndex = Op.getIndex();
21865 Op = MI->getOperand(1);
21867 AM.Scale = Op.getImm();
21868 Op = MI->getOperand(2);
21870 AM.IndexReg = Op.getImm();
21871 Op = MI->getOperand(3);
21872 if (Op.isGlobal()) {
21873 AM.GV = Op.getGlobal();
21875 AM.Disp = Op.getImm();
21877 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
21878 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
21880 // Reload the original control word now.
21881 addFrameReference(BuildMI(*BB, MI, DL,
21882 TII->get(X86::FLDCW16m)), CWFrameIdx);
21884 MI->eraseFromParent(); // The pseudo instruction is gone now.
21887 // String/text processing lowering.
21888 case X86::PCMPISTRM128REG:
21889 case X86::VPCMPISTRM128REG:
21890 case X86::PCMPISTRM128MEM:
21891 case X86::VPCMPISTRM128MEM:
21892 case X86::PCMPESTRM128REG:
21893 case X86::VPCMPESTRM128REG:
21894 case X86::PCMPESTRM128MEM:
21895 case X86::VPCMPESTRM128MEM:
21896 assert(Subtarget->hasSSE42() &&
21897 "Target must have SSE4.2 or AVX features enabled");
21898 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
21900 // String/text processing lowering.
21901 case X86::PCMPISTRIREG:
21902 case X86::VPCMPISTRIREG:
21903 case X86::PCMPISTRIMEM:
21904 case X86::VPCMPISTRIMEM:
21905 case X86::PCMPESTRIREG:
21906 case X86::VPCMPESTRIREG:
21907 case X86::PCMPESTRIMEM:
21908 case X86::VPCMPESTRIMEM:
21909 assert(Subtarget->hasSSE42() &&
21910 "Target must have SSE4.2 or AVX features enabled");
21911 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
21913 // Thread synchronization.
21915 return EmitMonitor(MI, BB, Subtarget);
21919 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
21921 case X86::VASTART_SAVE_XMM_REGS:
21922 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
21924 case X86::VAARG_64:
21925 return EmitVAARG64WithCustomInserter(MI, BB);
21927 case X86::EH_SjLj_SetJmp32:
21928 case X86::EH_SjLj_SetJmp64:
21929 return emitEHSjLjSetJmp(MI, BB);
21931 case X86::EH_SjLj_LongJmp32:
21932 case X86::EH_SjLj_LongJmp64:
21933 return emitEHSjLjLongJmp(MI, BB);
21935 case TargetOpcode::STATEPOINT:
21936 // As an implementation detail, STATEPOINT shares the STACKMAP format at
21937 // this point in the process. We diverge later.
21938 return emitPatchPoint(MI, BB);
21940 case TargetOpcode::STACKMAP:
21941 case TargetOpcode::PATCHPOINT:
21942 return emitPatchPoint(MI, BB);
21944 case X86::VFMADDPDr213r:
21945 case X86::VFMADDPSr213r:
21946 case X86::VFMADDSDr213r:
21947 case X86::VFMADDSSr213r:
21948 case X86::VFMSUBPDr213r:
21949 case X86::VFMSUBPSr213r:
21950 case X86::VFMSUBSDr213r:
21951 case X86::VFMSUBSSr213r:
21952 case X86::VFNMADDPDr213r:
21953 case X86::VFNMADDPSr213r:
21954 case X86::VFNMADDSDr213r:
21955 case X86::VFNMADDSSr213r:
21956 case X86::VFNMSUBPDr213r:
21957 case X86::VFNMSUBPSr213r:
21958 case X86::VFNMSUBSDr213r:
21959 case X86::VFNMSUBSSr213r:
21960 case X86::VFMADDSUBPDr213r:
21961 case X86::VFMADDSUBPSr213r:
21962 case X86::VFMSUBADDPDr213r:
21963 case X86::VFMSUBADDPSr213r:
21964 case X86::VFMADDPDr213rY:
21965 case X86::VFMADDPSr213rY:
21966 case X86::VFMSUBPDr213rY:
21967 case X86::VFMSUBPSr213rY:
21968 case X86::VFNMADDPDr213rY:
21969 case X86::VFNMADDPSr213rY:
21970 case X86::VFNMSUBPDr213rY:
21971 case X86::VFNMSUBPSr213rY:
21972 case X86::VFMADDSUBPDr213rY:
21973 case X86::VFMADDSUBPSr213rY:
21974 case X86::VFMSUBADDPDr213rY:
21975 case X86::VFMSUBADDPSr213rY:
21976 return emitFMA3Instr(MI, BB);
21980 //===----------------------------------------------------------------------===//
21981 // X86 Optimization Hooks
21982 //===----------------------------------------------------------------------===//
21984 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
21987 const SelectionDAG &DAG,
21988 unsigned Depth) const {
21989 unsigned BitWidth = KnownZero.getBitWidth();
21990 unsigned Opc = Op.getOpcode();
21991 assert((Opc >= ISD::BUILTIN_OP_END ||
21992 Opc == ISD::INTRINSIC_WO_CHAIN ||
21993 Opc == ISD::INTRINSIC_W_CHAIN ||
21994 Opc == ISD::INTRINSIC_VOID) &&
21995 "Should use MaskedValueIsZero if you don't know whether Op"
21996 " is a target node!");
21998 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
22012 // These nodes' second result is a boolean.
22013 if (Op.getResNo() == 0)
22016 case X86ISD::SETCC:
22017 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
22019 case ISD::INTRINSIC_WO_CHAIN: {
22020 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
22021 unsigned NumLoBits = 0;
22024 case Intrinsic::x86_sse_movmsk_ps:
22025 case Intrinsic::x86_avx_movmsk_ps_256:
22026 case Intrinsic::x86_sse2_movmsk_pd:
22027 case Intrinsic::x86_avx_movmsk_pd_256:
22028 case Intrinsic::x86_mmx_pmovmskb:
22029 case Intrinsic::x86_sse2_pmovmskb_128:
22030 case Intrinsic::x86_avx2_pmovmskb: {
22031 // High bits of movmskp{s|d}, pmovmskb are known zero.
22033 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
22034 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
22035 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
22036 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
22037 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
22038 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
22039 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
22040 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
22042 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
22051 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
22053 const SelectionDAG &,
22054 unsigned Depth) const {
22055 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
22056 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
22057 return Op.getValueType().getScalarType().getSizeInBits();
22063 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
22064 /// node is a GlobalAddress + offset.
22065 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
22066 const GlobalValue* &GA,
22067 int64_t &Offset) const {
22068 if (N->getOpcode() == X86ISD::Wrapper) {
22069 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
22070 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
22071 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
22075 return TargetLowering::isGAPlusOffset(N, GA, Offset);
22078 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
22079 /// same as extracting the high 128-bit part of 256-bit vector and then
22080 /// inserting the result into the low part of a new 256-bit vector
22081 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
22082 EVT VT = SVOp->getValueType(0);
22083 unsigned NumElems = VT.getVectorNumElements();
22085 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22086 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
22087 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22088 SVOp->getMaskElt(j) >= 0)
22094 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
22095 /// same as extracting the low 128-bit part of 256-bit vector and then
22096 /// inserting the result into the high part of a new 256-bit vector
22097 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
22098 EVT VT = SVOp->getValueType(0);
22099 unsigned NumElems = VT.getVectorNumElements();
22101 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22102 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
22103 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22104 SVOp->getMaskElt(j) >= 0)
22110 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
22111 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
22112 TargetLowering::DAGCombinerInfo &DCI,
22113 const X86Subtarget* Subtarget) {
22115 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22116 SDValue V1 = SVOp->getOperand(0);
22117 SDValue V2 = SVOp->getOperand(1);
22118 EVT VT = SVOp->getValueType(0);
22119 unsigned NumElems = VT.getVectorNumElements();
22121 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
22122 V2.getOpcode() == ISD::CONCAT_VECTORS) {
22126 // V UNDEF BUILD_VECTOR UNDEF
22128 // CONCAT_VECTOR CONCAT_VECTOR
22131 // RESULT: V + zero extended
22133 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
22134 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
22135 V1.getOperand(1).getOpcode() != ISD::UNDEF)
22138 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
22141 // To match the shuffle mask, the first half of the mask should
22142 // be exactly the first vector, and all the rest a splat with the
22143 // first element of the second one.
22144 for (unsigned i = 0; i != NumElems/2; ++i)
22145 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
22146 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
22149 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
22150 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
22151 if (Ld->hasNUsesOfValue(1, 0)) {
22152 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
22153 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
22155 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
22157 Ld->getPointerInfo(),
22158 Ld->getAlignment(),
22159 false/*isVolatile*/, true/*ReadMem*/,
22160 false/*WriteMem*/);
22162 // Make sure the newly-created LOAD is in the same position as Ld in
22163 // terms of dependency. We create a TokenFactor for Ld and ResNode,
22164 // and update uses of Ld's output chain to use the TokenFactor.
22165 if (Ld->hasAnyUseOfValue(1)) {
22166 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
22167 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
22168 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
22169 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
22170 SDValue(ResNode.getNode(), 1));
22173 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
22177 // Emit a zeroed vector and insert the desired subvector on its
22179 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
22180 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
22181 return DCI.CombineTo(N, InsV);
22184 //===--------------------------------------------------------------------===//
22185 // Combine some shuffles into subvector extracts and inserts:
22188 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22189 if (isShuffleHigh128VectorInsertLow(SVOp)) {
22190 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
22191 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
22192 return DCI.CombineTo(N, InsV);
22195 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22196 if (isShuffleLow128VectorInsertHigh(SVOp)) {
22197 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
22198 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
22199 return DCI.CombineTo(N, InsV);
22205 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
22208 /// This is the leaf of the recursive combinine below. When we have found some
22209 /// chain of single-use x86 shuffle instructions and accumulated the combined
22210 /// shuffle mask represented by them, this will try to pattern match that mask
22211 /// into either a single instruction if there is a special purpose instruction
22212 /// for this operation, or into a PSHUFB instruction which is a fully general
22213 /// instruction but should only be used to replace chains over a certain depth.
22214 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22215 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22216 TargetLowering::DAGCombinerInfo &DCI,
22217 const X86Subtarget *Subtarget) {
22218 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22220 // Find the operand that enters the chain. Note that multiple uses are OK
22221 // here, we're not going to remove the operand we find.
22222 SDValue Input = Op.getOperand(0);
22223 while (Input.getOpcode() == ISD::BITCAST)
22224 Input = Input.getOperand(0);
22226 MVT VT = Input.getSimpleValueType();
22227 MVT RootVT = Root.getSimpleValueType();
22230 // Just remove no-op shuffle masks.
22231 if (Mask.size() == 1) {
22232 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
22237 // Use the float domain if the operand type is a floating point type.
22238 bool FloatDomain = VT.isFloatingPoint();
22240 // For floating point shuffles, we don't have free copies in the shuffle
22241 // instructions or the ability to load as part of the instruction, so
22242 // canonicalize their shuffles to UNPCK or MOV variants.
22244 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22245 // vectors because it can have a load folded into it that UNPCK cannot. This
22246 // doesn't preclude something switching to the shorter encoding post-RA.
22248 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
22249 bool Lo = Mask.equals(0, 0);
22252 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22253 // is no slower than UNPCKLPD but has the option to fold the input operand
22254 // into even an unaligned memory load.
22255 if (Lo && Subtarget->hasSSE3()) {
22256 Shuffle = X86ISD::MOVDDUP;
22257 ShuffleVT = MVT::v2f64;
22259 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22260 // than the UNPCK variants.
22261 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22262 ShuffleVT = MVT::v4f32;
22264 if (Depth == 1 && Root->getOpcode() == Shuffle)
22265 return false; // Nothing to do!
22266 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22267 DCI.AddToWorklist(Op.getNode());
22268 if (Shuffle == X86ISD::MOVDDUP)
22269 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22271 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22272 DCI.AddToWorklist(Op.getNode());
22273 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22277 if (Subtarget->hasSSE3() &&
22278 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22279 bool Lo = Mask.equals(0, 0, 2, 2);
22280 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22281 MVT ShuffleVT = MVT::v4f32;
22282 if (Depth == 1 && Root->getOpcode() == Shuffle)
22283 return false; // Nothing to do!
22284 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22285 DCI.AddToWorklist(Op.getNode());
22286 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22287 DCI.AddToWorklist(Op.getNode());
22288 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22292 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22293 bool Lo = Mask.equals(0, 0, 1, 1);
22294 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22295 MVT ShuffleVT = MVT::v4f32;
22296 if (Depth == 1 && Root->getOpcode() == Shuffle)
22297 return false; // Nothing to do!
22298 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22299 DCI.AddToWorklist(Op.getNode());
22300 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22301 DCI.AddToWorklist(Op.getNode());
22302 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22308 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22309 // variants as none of these have single-instruction variants that are
22310 // superior to the UNPCK formulation.
22311 if (!FloatDomain &&
22312 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22313 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22314 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22315 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22317 bool Lo = Mask[0] == 0;
22318 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22319 if (Depth == 1 && Root->getOpcode() == Shuffle)
22320 return false; // Nothing to do!
22322 switch (Mask.size()) {
22324 ShuffleVT = MVT::v8i16;
22327 ShuffleVT = MVT::v16i8;
22330 llvm_unreachable("Impossible mask size!");
22332 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22333 DCI.AddToWorklist(Op.getNode());
22334 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22335 DCI.AddToWorklist(Op.getNode());
22336 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22341 // Don't try to re-form single instruction chains under any circumstances now
22342 // that we've done encoding canonicalization for them.
22346 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22347 // can replace them with a single PSHUFB instruction profitably. Intel's
22348 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22349 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22350 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22351 SmallVector<SDValue, 16> PSHUFBMask;
22352 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22353 int Ratio = 16 / Mask.size();
22354 for (unsigned i = 0; i < 16; ++i) {
22355 if (Mask[i / Ratio] == SM_SentinelUndef) {
22356 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22359 int M = Mask[i / Ratio] != SM_SentinelZero
22360 ? Ratio * Mask[i / Ratio] + i % Ratio
22362 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22364 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22365 DCI.AddToWorklist(Op.getNode());
22366 SDValue PSHUFBMaskOp =
22367 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22368 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22369 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22370 DCI.AddToWorklist(Op.getNode());
22371 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22376 // Failed to find any combines.
22380 /// \brief Fully generic combining of x86 shuffle instructions.
22382 /// This should be the last combine run over the x86 shuffle instructions. Once
22383 /// they have been fully optimized, this will recursively consider all chains
22384 /// of single-use shuffle instructions, build a generic model of the cumulative
22385 /// shuffle operation, and check for simpler instructions which implement this
22386 /// operation. We use this primarily for two purposes:
22388 /// 1) Collapse generic shuffles to specialized single instructions when
22389 /// equivalent. In most cases, this is just an encoding size win, but
22390 /// sometimes we will collapse multiple generic shuffles into a single
22391 /// special-purpose shuffle.
22392 /// 2) Look for sequences of shuffle instructions with 3 or more total
22393 /// instructions, and replace them with the slightly more expensive SSSE3
22394 /// PSHUFB instruction if available. We do this as the last combining step
22395 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22396 /// a suitable short sequence of other instructions. The PHUFB will either
22397 /// use a register or have to read from memory and so is slightly (but only
22398 /// slightly) more expensive than the other shuffle instructions.
22400 /// Because this is inherently a quadratic operation (for each shuffle in
22401 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22402 /// This should never be an issue in practice as the shuffle lowering doesn't
22403 /// produce sequences of more than 8 instructions.
22405 /// FIXME: We will currently miss some cases where the redundant shuffling
22406 /// would simplify under the threshold for PSHUFB formation because of
22407 /// combine-ordering. To fix this, we should do the redundant instruction
22408 /// combining in this recursive walk.
22409 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22410 ArrayRef<int> RootMask,
22411 int Depth, bool HasPSHUFB,
22413 TargetLowering::DAGCombinerInfo &DCI,
22414 const X86Subtarget *Subtarget) {
22415 // Bound the depth of our recursive combine because this is ultimately
22416 // quadratic in nature.
22420 // Directly rip through bitcasts to find the underlying operand.
22421 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22422 Op = Op.getOperand(0);
22424 MVT VT = Op.getSimpleValueType();
22425 if (!VT.isVector())
22426 return false; // Bail if we hit a non-vector.
22427 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22428 // version should be added.
22429 if (VT.getSizeInBits() != 128)
22432 assert(Root.getSimpleValueType().isVector() &&
22433 "Shuffles operate on vector types!");
22434 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22435 "Can only combine shuffles of the same vector register size.");
22437 if (!isTargetShuffle(Op.getOpcode()))
22439 SmallVector<int, 16> OpMask;
22441 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22442 // We only can combine unary shuffles which we can decode the mask for.
22443 if (!HaveMask || !IsUnary)
22446 assert(VT.getVectorNumElements() == OpMask.size() &&
22447 "Different mask size from vector size!");
22448 assert(((RootMask.size() > OpMask.size() &&
22449 RootMask.size() % OpMask.size() == 0) ||
22450 (OpMask.size() > RootMask.size() &&
22451 OpMask.size() % RootMask.size() == 0) ||
22452 OpMask.size() == RootMask.size()) &&
22453 "The smaller number of elements must divide the larger.");
22454 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22455 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22456 assert(((RootRatio == 1 && OpRatio == 1) ||
22457 (RootRatio == 1) != (OpRatio == 1)) &&
22458 "Must not have a ratio for both incoming and op masks!");
22460 SmallVector<int, 16> Mask;
22461 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22463 // Merge this shuffle operation's mask into our accumulated mask. Note that
22464 // this shuffle's mask will be the first applied to the input, followed by the
22465 // root mask to get us all the way to the root value arrangement. The reason
22466 // for this order is that we are recursing up the operation chain.
22467 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22468 int RootIdx = i / RootRatio;
22469 if (RootMask[RootIdx] < 0) {
22470 // This is a zero or undef lane, we're done.
22471 Mask.push_back(RootMask[RootIdx]);
22475 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22476 int OpIdx = RootMaskedIdx / OpRatio;
22477 if (OpMask[OpIdx] < 0) {
22478 // The incoming lanes are zero or undef, it doesn't matter which ones we
22480 Mask.push_back(OpMask[OpIdx]);
22484 // Ok, we have non-zero lanes, map them through.
22485 Mask.push_back(OpMask[OpIdx] * OpRatio +
22486 RootMaskedIdx % OpRatio);
22489 // See if we can recurse into the operand to combine more things.
22490 switch (Op.getOpcode()) {
22491 case X86ISD::PSHUFB:
22493 case X86ISD::PSHUFD:
22494 case X86ISD::PSHUFHW:
22495 case X86ISD::PSHUFLW:
22496 if (Op.getOperand(0).hasOneUse() &&
22497 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22498 HasPSHUFB, DAG, DCI, Subtarget))
22502 case X86ISD::UNPCKL:
22503 case X86ISD::UNPCKH:
22504 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22505 // We can't check for single use, we have to check that this shuffle is the only user.
22506 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22507 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22508 HasPSHUFB, DAG, DCI, Subtarget))
22513 // Minor canonicalization of the accumulated shuffle mask to make it easier
22514 // to match below. All this does is detect masks with squential pairs of
22515 // elements, and shrink them to the half-width mask. It does this in a loop
22516 // so it will reduce the size of the mask to the minimal width mask which
22517 // performs an equivalent shuffle.
22518 SmallVector<int, 16> WidenedMask;
22519 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22520 Mask = std::move(WidenedMask);
22521 WidenedMask.clear();
22524 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22528 /// \brief Get the PSHUF-style mask from PSHUF node.
22530 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22531 /// PSHUF-style masks that can be reused with such instructions.
22532 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22533 SmallVector<int, 4> Mask;
22535 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22539 switch (N.getOpcode()) {
22540 case X86ISD::PSHUFD:
22542 case X86ISD::PSHUFLW:
22545 case X86ISD::PSHUFHW:
22546 Mask.erase(Mask.begin(), Mask.begin() + 4);
22547 for (int &M : Mask)
22551 llvm_unreachable("No valid shuffle instruction found!");
22555 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22557 /// We walk up the chain and look for a combinable shuffle, skipping over
22558 /// shuffles that we could hoist this shuffle's transformation past without
22559 /// altering anything.
22561 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22563 TargetLowering::DAGCombinerInfo &DCI) {
22564 assert(N.getOpcode() == X86ISD::PSHUFD &&
22565 "Called with something other than an x86 128-bit half shuffle!");
22568 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22569 // of the shuffles in the chain so that we can form a fresh chain to replace
22571 SmallVector<SDValue, 8> Chain;
22572 SDValue V = N.getOperand(0);
22573 for (; V.hasOneUse(); V = V.getOperand(0)) {
22574 switch (V.getOpcode()) {
22576 return SDValue(); // Nothing combined!
22579 // Skip bitcasts as we always know the type for the target specific
22583 case X86ISD::PSHUFD:
22584 // Found another dword shuffle.
22587 case X86ISD::PSHUFLW:
22588 // Check that the low words (being shuffled) are the identity in the
22589 // dword shuffle, and the high words are self-contained.
22590 if (Mask[0] != 0 || Mask[1] != 1 ||
22591 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22594 Chain.push_back(V);
22597 case X86ISD::PSHUFHW:
22598 // Check that the high words (being shuffled) are the identity in the
22599 // dword shuffle, and the low words are self-contained.
22600 if (Mask[2] != 2 || Mask[3] != 3 ||
22601 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22604 Chain.push_back(V);
22607 case X86ISD::UNPCKL:
22608 case X86ISD::UNPCKH:
22609 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22610 // shuffle into a preceding word shuffle.
22611 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22614 // Search for a half-shuffle which we can combine with.
22615 unsigned CombineOp =
22616 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22617 if (V.getOperand(0) != V.getOperand(1) ||
22618 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22620 Chain.push_back(V);
22621 V = V.getOperand(0);
22623 switch (V.getOpcode()) {
22625 return SDValue(); // Nothing to combine.
22627 case X86ISD::PSHUFLW:
22628 case X86ISD::PSHUFHW:
22629 if (V.getOpcode() == CombineOp)
22632 Chain.push_back(V);
22636 V = V.getOperand(0);
22640 } while (V.hasOneUse());
22643 // Break out of the loop if we break out of the switch.
22647 if (!V.hasOneUse())
22648 // We fell out of the loop without finding a viable combining instruction.
22651 // Merge this node's mask and our incoming mask.
22652 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22653 for (int &M : Mask)
22655 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22656 getV4X86ShuffleImm8ForMask(Mask, DAG));
22658 // Rebuild the chain around this new shuffle.
22659 while (!Chain.empty()) {
22660 SDValue W = Chain.pop_back_val();
22662 if (V.getValueType() != W.getOperand(0).getValueType())
22663 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22665 switch (W.getOpcode()) {
22667 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22669 case X86ISD::UNPCKL:
22670 case X86ISD::UNPCKH:
22671 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22674 case X86ISD::PSHUFD:
22675 case X86ISD::PSHUFLW:
22676 case X86ISD::PSHUFHW:
22677 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22681 if (V.getValueType() != N.getValueType())
22682 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22684 // Return the new chain to replace N.
22688 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22690 /// We walk up the chain, skipping shuffles of the other half and looking
22691 /// through shuffles which switch halves trying to find a shuffle of the same
22692 /// pair of dwords.
22693 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22695 TargetLowering::DAGCombinerInfo &DCI) {
22697 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22698 "Called with something other than an x86 128-bit half shuffle!");
22700 unsigned CombineOpcode = N.getOpcode();
22702 // Walk up a single-use chain looking for a combinable shuffle.
22703 SDValue V = N.getOperand(0);
22704 for (; V.hasOneUse(); V = V.getOperand(0)) {
22705 switch (V.getOpcode()) {
22707 return false; // Nothing combined!
22710 // Skip bitcasts as we always know the type for the target specific
22714 case X86ISD::PSHUFLW:
22715 case X86ISD::PSHUFHW:
22716 if (V.getOpcode() == CombineOpcode)
22719 // Other-half shuffles are no-ops.
22722 // Break out of the loop if we break out of the switch.
22726 if (!V.hasOneUse())
22727 // We fell out of the loop without finding a viable combining instruction.
22730 // Combine away the bottom node as its shuffle will be accumulated into
22731 // a preceding shuffle.
22732 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22734 // Record the old value.
22737 // Merge this node's mask and our incoming mask (adjusted to account for all
22738 // the pshufd instructions encountered).
22739 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22740 for (int &M : Mask)
22742 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22743 getV4X86ShuffleImm8ForMask(Mask, DAG));
22745 // Check that the shuffles didn't cancel each other out. If not, we need to
22746 // combine to the new one.
22748 // Replace the combinable shuffle with the combined one, updating all users
22749 // so that we re-evaluate the chain here.
22750 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22755 /// \brief Try to combine x86 target specific shuffles.
22756 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22757 TargetLowering::DAGCombinerInfo &DCI,
22758 const X86Subtarget *Subtarget) {
22760 MVT VT = N.getSimpleValueType();
22761 SmallVector<int, 4> Mask;
22763 switch (N.getOpcode()) {
22764 case X86ISD::PSHUFD:
22765 case X86ISD::PSHUFLW:
22766 case X86ISD::PSHUFHW:
22767 Mask = getPSHUFShuffleMask(N);
22768 assert(Mask.size() == 4);
22774 // Nuke no-op shuffles that show up after combining.
22775 if (isNoopShuffleMask(Mask))
22776 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22778 // Look for simplifications involving one or two shuffle instructions.
22779 SDValue V = N.getOperand(0);
22780 switch (N.getOpcode()) {
22783 case X86ISD::PSHUFLW:
22784 case X86ISD::PSHUFHW:
22785 assert(VT == MVT::v8i16);
22788 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22789 return SDValue(); // We combined away this shuffle, so we're done.
22791 // See if this reduces to a PSHUFD which is no more expensive and can
22792 // combine with more operations. Note that it has to at least flip the
22793 // dwords as otherwise it would have been removed as a no-op.
22794 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22795 int DMask[] = {0, 1, 2, 3};
22796 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22797 DMask[DOffset + 0] = DOffset + 1;
22798 DMask[DOffset + 1] = DOffset + 0;
22799 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22800 DCI.AddToWorklist(V.getNode());
22801 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22802 getV4X86ShuffleImm8ForMask(DMask, DAG));
22803 DCI.AddToWorklist(V.getNode());
22804 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22807 // Look for shuffle patterns which can be implemented as a single unpack.
22808 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22809 // only works when we have a PSHUFD followed by two half-shuffles.
22810 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22811 (V.getOpcode() == X86ISD::PSHUFLW ||
22812 V.getOpcode() == X86ISD::PSHUFHW) &&
22813 V.getOpcode() != N.getOpcode() &&
22815 SDValue D = V.getOperand(0);
22816 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22817 D = D.getOperand(0);
22818 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22819 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22820 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22821 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22822 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22824 for (int i = 0; i < 4; ++i) {
22825 WordMask[i + NOffset] = Mask[i] + NOffset;
22826 WordMask[i + VOffset] = VMask[i] + VOffset;
22828 // Map the word mask through the DWord mask.
22830 for (int i = 0; i < 8; ++i)
22831 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22832 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22833 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22834 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22835 std::begin(UnpackLoMask)) ||
22836 std::equal(std::begin(MappedMask), std::end(MappedMask),
22837 std::begin(UnpackHiMask))) {
22838 // We can replace all three shuffles with an unpack.
22839 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22840 DCI.AddToWorklist(V.getNode());
22841 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22843 DL, MVT::v8i16, V, V);
22850 case X86ISD::PSHUFD:
22851 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22860 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22862 /// We combine this directly on the abstract vector shuffle nodes so it is
22863 /// easier to generically match. We also insert dummy vector shuffle nodes for
22864 /// the operands which explicitly discard the lanes which are unused by this
22865 /// operation to try to flow through the rest of the combiner the fact that
22866 /// they're unused.
22867 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
22869 EVT VT = N->getValueType(0);
22871 // We only handle target-independent shuffles.
22872 // FIXME: It would be easy and harmless to use the target shuffle mask
22873 // extraction tool to support more.
22874 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
22877 auto *SVN = cast<ShuffleVectorSDNode>(N);
22878 ArrayRef<int> Mask = SVN->getMask();
22879 SDValue V1 = N->getOperand(0);
22880 SDValue V2 = N->getOperand(1);
22882 // We require the first shuffle operand to be the SUB node, and the second to
22883 // be the ADD node.
22884 // FIXME: We should support the commuted patterns.
22885 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
22888 // If there are other uses of these operations we can't fold them.
22889 if (!V1->hasOneUse() || !V2->hasOneUse())
22892 // Ensure that both operations have the same operands. Note that we can
22893 // commute the FADD operands.
22894 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
22895 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
22896 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
22899 // We're looking for blends between FADD and FSUB nodes. We insist on these
22900 // nodes being lined up in a specific expected pattern.
22901 if (!(isShuffleEquivalent(V1, V2, Mask, 0, 3) ||
22902 isShuffleEquivalent(V1, V2, Mask, 0, 5, 2, 7) ||
22903 isShuffleEquivalent(V1, V2, Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
22906 // Only specific types are legal at this point, assert so we notice if and
22907 // when these change.
22908 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
22909 VT == MVT::v4f64) &&
22910 "Unknown vector type encountered!");
22912 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
22915 /// PerformShuffleCombine - Performs several different shuffle combines.
22916 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
22917 TargetLowering::DAGCombinerInfo &DCI,
22918 const X86Subtarget *Subtarget) {
22920 SDValue N0 = N->getOperand(0);
22921 SDValue N1 = N->getOperand(1);
22922 EVT VT = N->getValueType(0);
22924 // Don't create instructions with illegal types after legalize types has run.
22925 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22926 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
22929 // If we have legalized the vector types, look for blends of FADD and FSUB
22930 // nodes that we can fuse into an ADDSUB node.
22931 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
22932 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
22935 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
22936 if (Subtarget->hasFp256() && VT.is256BitVector() &&
22937 N->getOpcode() == ISD::VECTOR_SHUFFLE)
22938 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
22940 // During Type Legalization, when promoting illegal vector types,
22941 // the backend might introduce new shuffle dag nodes and bitcasts.
22943 // This code performs the following transformation:
22944 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
22945 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
22947 // We do this only if both the bitcast and the BINOP dag nodes have
22948 // one use. Also, perform this transformation only if the new binary
22949 // operation is legal. This is to avoid introducing dag nodes that
22950 // potentially need to be further expanded (or custom lowered) into a
22951 // less optimal sequence of dag nodes.
22952 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
22953 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
22954 N0.getOpcode() == ISD::BITCAST) {
22955 SDValue BC0 = N0.getOperand(0);
22956 EVT SVT = BC0.getValueType();
22957 unsigned Opcode = BC0.getOpcode();
22958 unsigned NumElts = VT.getVectorNumElements();
22960 if (BC0.hasOneUse() && SVT.isVector() &&
22961 SVT.getVectorNumElements() * 2 == NumElts &&
22962 TLI.isOperationLegal(Opcode, VT)) {
22963 bool CanFold = false;
22975 unsigned SVTNumElts = SVT.getVectorNumElements();
22976 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22977 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
22978 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
22979 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
22980 CanFold = SVOp->getMaskElt(i) < 0;
22983 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
22984 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
22985 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
22986 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
22991 // Only handle 128 wide vector from here on.
22992 if (!VT.is128BitVector())
22995 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
22996 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
22997 // consecutive, non-overlapping, and in the right order.
22998 SmallVector<SDValue, 16> Elts;
22999 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
23000 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
23002 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
23006 if (isTargetShuffle(N->getOpcode())) {
23008 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
23009 if (Shuffle.getNode())
23012 // Try recursively combining arbitrary sequences of x86 shuffle
23013 // instructions into higher-order shuffles. We do this after combining
23014 // specific PSHUF instruction sequences into their minimal form so that we
23015 // can evaluate how many specialized shuffle instructions are involved in
23016 // a particular chain.
23017 SmallVector<int, 1> NonceMask; // Just a placeholder.
23018 NonceMask.push_back(0);
23019 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
23020 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
23022 return SDValue(); // This routine will use CombineTo to replace N.
23028 /// PerformTruncateCombine - Converts truncate operation to
23029 /// a sequence of vector shuffle operations.
23030 /// It is possible when we truncate 256-bit vector to 128-bit vector
23031 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
23032 TargetLowering::DAGCombinerInfo &DCI,
23033 const X86Subtarget *Subtarget) {
23037 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
23038 /// specific shuffle of a load can be folded into a single element load.
23039 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
23040 /// shuffles have been custom lowered so we need to handle those here.
23041 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
23042 TargetLowering::DAGCombinerInfo &DCI) {
23043 if (DCI.isBeforeLegalizeOps())
23046 SDValue InVec = N->getOperand(0);
23047 SDValue EltNo = N->getOperand(1);
23049 if (!isa<ConstantSDNode>(EltNo))
23052 EVT OriginalVT = InVec.getValueType();
23054 if (InVec.getOpcode() == ISD::BITCAST) {
23055 // Don't duplicate a load with other uses.
23056 if (!InVec.hasOneUse())
23058 EVT BCVT = InVec.getOperand(0).getValueType();
23059 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
23061 InVec = InVec.getOperand(0);
23064 EVT CurrentVT = InVec.getValueType();
23066 if (!isTargetShuffle(InVec.getOpcode()))
23069 // Don't duplicate a load with other uses.
23070 if (!InVec.hasOneUse())
23073 SmallVector<int, 16> ShuffleMask;
23075 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
23076 ShuffleMask, UnaryShuffle))
23079 // Select the input vector, guarding against out of range extract vector.
23080 unsigned NumElems = CurrentVT.getVectorNumElements();
23081 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
23082 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
23083 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
23084 : InVec.getOperand(1);
23086 // If inputs to shuffle are the same for both ops, then allow 2 uses
23087 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
23088 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
23090 if (LdNode.getOpcode() == ISD::BITCAST) {
23091 // Don't duplicate a load with other uses.
23092 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
23095 AllowedUses = 1; // only allow 1 load use if we have a bitcast
23096 LdNode = LdNode.getOperand(0);
23099 if (!ISD::isNormalLoad(LdNode.getNode()))
23102 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
23104 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
23107 EVT EltVT = N->getValueType(0);
23108 // If there's a bitcast before the shuffle, check if the load type and
23109 // alignment is valid.
23110 unsigned Align = LN0->getAlignment();
23111 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23112 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
23113 EltVT.getTypeForEVT(*DAG.getContext()));
23115 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
23118 // All checks match so transform back to vector_shuffle so that DAG combiner
23119 // can finish the job
23122 // Create shuffle node taking into account the case that its a unary shuffle
23123 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
23124 : InVec.getOperand(1);
23125 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
23126 InVec.getOperand(0), Shuffle,
23128 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
23129 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
23133 /// \brief Detect bitcasts between i32 to x86mmx low word. Since MMX types are
23134 /// special and don't usually play with other vector types, it's better to
23135 /// handle them early to be sure we emit efficient code by avoiding
23136 /// store-load conversions.
23137 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG) {
23138 if (N->getValueType(0) != MVT::x86mmx ||
23139 N->getOperand(0)->getOpcode() != ISD::BUILD_VECTOR ||
23140 N->getOperand(0)->getValueType(0) != MVT::v2i32)
23143 SDValue V = N->getOperand(0);
23144 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1));
23145 if (C && C->getZExtValue() == 0 && V.getOperand(0).getValueType() == MVT::i32)
23146 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(V.getOperand(0)),
23147 N->getValueType(0), V.getOperand(0));
23152 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
23153 /// generation and convert it from being a bunch of shuffles and extracts
23154 /// into a somewhat faster sequence. For i686, the best sequence is apparently
23155 /// storing the value and loading scalars back, while for x64 we should
23156 /// use 64-bit extracts and shifts.
23157 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
23158 TargetLowering::DAGCombinerInfo &DCI) {
23159 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
23160 if (NewOp.getNode())
23163 SDValue InputVector = N->getOperand(0);
23165 // Detect mmx to i32 conversion through a v2i32 elt extract.
23166 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
23167 N->getValueType(0) == MVT::i32 &&
23168 InputVector.getValueType() == MVT::v2i32) {
23170 // The bitcast source is a direct mmx result.
23171 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
23172 if (MMXSrc.getValueType() == MVT::x86mmx)
23173 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23174 N->getValueType(0),
23175 InputVector.getNode()->getOperand(0));
23177 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
23178 SDValue MMXSrcOp = MMXSrc.getOperand(0);
23179 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
23180 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
23181 MMXSrcOp.getOpcode() == ISD::BITCAST &&
23182 MMXSrcOp.getValueType() == MVT::v1i64 &&
23183 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
23184 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23185 N->getValueType(0),
23186 MMXSrcOp.getOperand(0));
23189 // Only operate on vectors of 4 elements, where the alternative shuffling
23190 // gets to be more expensive.
23191 if (InputVector.getValueType() != MVT::v4i32)
23194 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
23195 // single use which is a sign-extend or zero-extend, and all elements are
23197 SmallVector<SDNode *, 4> Uses;
23198 unsigned ExtractedElements = 0;
23199 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
23200 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
23201 if (UI.getUse().getResNo() != InputVector.getResNo())
23204 SDNode *Extract = *UI;
23205 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23208 if (Extract->getValueType(0) != MVT::i32)
23210 if (!Extract->hasOneUse())
23212 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
23213 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
23215 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
23218 // Record which element was extracted.
23219 ExtractedElements |=
23220 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
23222 Uses.push_back(Extract);
23225 // If not all the elements were used, this may not be worthwhile.
23226 if (ExtractedElements != 15)
23229 // Ok, we've now decided to do the transformation.
23230 // If 64-bit shifts are legal, use the extract-shift sequence,
23231 // otherwise bounce the vector off the cache.
23232 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23234 SDLoc dl(InputVector);
23236 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
23237 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
23238 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
23239 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23240 DAG.getConstant(0, VecIdxTy));
23241 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23242 DAG.getConstant(1, VecIdxTy));
23244 SDValue ShAmt = DAG.getConstant(32,
23245 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
23246 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
23247 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23248 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
23249 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
23250 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23251 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
23253 // Store the value to a temporary stack slot.
23254 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
23255 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
23256 MachinePointerInfo(), false, false, 0);
23258 EVT ElementType = InputVector.getValueType().getVectorElementType();
23259 unsigned EltSize = ElementType.getSizeInBits() / 8;
23261 // Replace each use (extract) with a load of the appropriate element.
23262 for (unsigned i = 0; i < 4; ++i) {
23263 uint64_t Offset = EltSize * i;
23264 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
23266 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
23267 StackPtr, OffsetVal);
23269 // Load the scalar.
23270 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
23271 ScalarAddr, MachinePointerInfo(),
23272 false, false, false, 0);
23277 // Replace the extracts
23278 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
23279 UE = Uses.end(); UI != UE; ++UI) {
23280 SDNode *Extract = *UI;
23282 SDValue Idx = Extract->getOperand(1);
23283 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
23284 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
23287 // The replacement was made in place; don't return anything.
23291 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
23292 static std::pair<unsigned, bool>
23293 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
23294 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
23295 if (!VT.isVector())
23296 return std::make_pair(0, false);
23298 bool NeedSplit = false;
23299 switch (VT.getSimpleVT().SimpleTy) {
23300 default: return std::make_pair(0, false);
23303 if (!Subtarget->hasVLX())
23304 return std::make_pair(0, false);
23308 if (!Subtarget->hasBWI())
23309 return std::make_pair(0, false);
23313 if (!Subtarget->hasAVX512())
23314 return std::make_pair(0, false);
23319 if (!Subtarget->hasAVX2())
23321 if (!Subtarget->hasAVX())
23322 return std::make_pair(0, false);
23327 if (!Subtarget->hasSSE2())
23328 return std::make_pair(0, false);
23331 // SSE2 has only a small subset of the operations.
23332 bool hasUnsigned = Subtarget->hasSSE41() ||
23333 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23334 bool hasSigned = Subtarget->hasSSE41() ||
23335 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23337 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23340 // Check for x CC y ? x : y.
23341 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23342 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23347 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23350 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23353 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23356 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23358 // Check for x CC y ? y : x -- a min/max with reversed arms.
23359 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23360 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23365 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23368 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23371 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23374 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23378 return std::make_pair(Opc, NeedSplit);
23382 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23383 const X86Subtarget *Subtarget) {
23385 SDValue Cond = N->getOperand(0);
23386 SDValue LHS = N->getOperand(1);
23387 SDValue RHS = N->getOperand(2);
23389 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23390 SDValue CondSrc = Cond->getOperand(0);
23391 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23392 Cond = CondSrc->getOperand(0);
23395 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23398 // A vselect where all conditions and data are constants can be optimized into
23399 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23400 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23401 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23404 unsigned MaskValue = 0;
23405 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23408 MVT VT = N->getSimpleValueType(0);
23409 unsigned NumElems = VT.getVectorNumElements();
23410 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23411 for (unsigned i = 0; i < NumElems; ++i) {
23412 // Be sure we emit undef where we can.
23413 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23414 ShuffleMask[i] = -1;
23416 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23419 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23420 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23422 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23425 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23427 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23428 TargetLowering::DAGCombinerInfo &DCI,
23429 const X86Subtarget *Subtarget) {
23431 SDValue Cond = N->getOperand(0);
23432 // Get the LHS/RHS of the select.
23433 SDValue LHS = N->getOperand(1);
23434 SDValue RHS = N->getOperand(2);
23435 EVT VT = LHS.getValueType();
23436 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23438 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23439 // instructions match the semantics of the common C idiom x<y?x:y but not
23440 // x<=y?x:y, because of how they handle negative zero (which can be
23441 // ignored in unsafe-math mode).
23442 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23443 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23444 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23445 (Subtarget->hasSSE2() ||
23446 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23447 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23449 unsigned Opcode = 0;
23450 // Check for x CC y ? x : y.
23451 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23452 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23456 // Converting this to a min would handle NaNs incorrectly, and swapping
23457 // the operands would cause it to handle comparisons between positive
23458 // and negative zero incorrectly.
23459 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23460 if (!DAG.getTarget().Options.UnsafeFPMath &&
23461 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23463 std::swap(LHS, RHS);
23465 Opcode = X86ISD::FMIN;
23468 // Converting this to a min would handle comparisons between positive
23469 // and negative zero incorrectly.
23470 if (!DAG.getTarget().Options.UnsafeFPMath &&
23471 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23473 Opcode = X86ISD::FMIN;
23476 // Converting this to a min would handle both negative zeros and NaNs
23477 // incorrectly, but we can swap the operands to fix both.
23478 std::swap(LHS, RHS);
23482 Opcode = X86ISD::FMIN;
23486 // Converting this to a max would handle comparisons between positive
23487 // and negative zero incorrectly.
23488 if (!DAG.getTarget().Options.UnsafeFPMath &&
23489 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23491 Opcode = X86ISD::FMAX;
23494 // Converting this to a max would handle NaNs incorrectly, and swapping
23495 // the operands would cause it to handle comparisons between positive
23496 // and negative zero incorrectly.
23497 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23498 if (!DAG.getTarget().Options.UnsafeFPMath &&
23499 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23501 std::swap(LHS, RHS);
23503 Opcode = X86ISD::FMAX;
23506 // Converting this to a max would handle both negative zeros and NaNs
23507 // incorrectly, but we can swap the operands to fix both.
23508 std::swap(LHS, RHS);
23512 Opcode = X86ISD::FMAX;
23515 // Check for x CC y ? y : x -- a min/max with reversed arms.
23516 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23517 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23521 // Converting this to a min would handle comparisons between positive
23522 // and negative zero incorrectly, and swapping the operands would
23523 // cause it to handle NaNs incorrectly.
23524 if (!DAG.getTarget().Options.UnsafeFPMath &&
23525 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23526 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23528 std::swap(LHS, RHS);
23530 Opcode = X86ISD::FMIN;
23533 // Converting this to a min would handle NaNs incorrectly.
23534 if (!DAG.getTarget().Options.UnsafeFPMath &&
23535 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23537 Opcode = X86ISD::FMIN;
23540 // Converting this to a min would handle both negative zeros and NaNs
23541 // incorrectly, but we can swap the operands to fix both.
23542 std::swap(LHS, RHS);
23546 Opcode = X86ISD::FMIN;
23550 // Converting this to a max would handle NaNs incorrectly.
23551 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23553 Opcode = X86ISD::FMAX;
23556 // Converting this to a max would handle comparisons between positive
23557 // and negative zero incorrectly, and swapping the operands would
23558 // cause it to handle NaNs incorrectly.
23559 if (!DAG.getTarget().Options.UnsafeFPMath &&
23560 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23561 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23563 std::swap(LHS, RHS);
23565 Opcode = X86ISD::FMAX;
23568 // Converting this to a max would handle both negative zeros and NaNs
23569 // incorrectly, but we can swap the operands to fix both.
23570 std::swap(LHS, RHS);
23574 Opcode = X86ISD::FMAX;
23580 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23583 EVT CondVT = Cond.getValueType();
23584 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23585 CondVT.getVectorElementType() == MVT::i1) {
23586 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23587 // lowering on KNL. In this case we convert it to
23588 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23589 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23590 // Since SKX these selects have a proper lowering.
23591 EVT OpVT = LHS.getValueType();
23592 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23593 (OpVT.getVectorElementType() == MVT::i8 ||
23594 OpVT.getVectorElementType() == MVT::i16) &&
23595 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23596 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23597 DCI.AddToWorklist(Cond.getNode());
23598 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23601 // If this is a select between two integer constants, try to do some
23603 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23604 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23605 // Don't do this for crazy integer types.
23606 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23607 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23608 // so that TrueC (the true value) is larger than FalseC.
23609 bool NeedsCondInvert = false;
23611 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23612 // Efficiently invertible.
23613 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23614 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23615 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23616 NeedsCondInvert = true;
23617 std::swap(TrueC, FalseC);
23620 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23621 if (FalseC->getAPIntValue() == 0 &&
23622 TrueC->getAPIntValue().isPowerOf2()) {
23623 if (NeedsCondInvert) // Invert the condition if needed.
23624 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23625 DAG.getConstant(1, Cond.getValueType()));
23627 // Zero extend the condition if needed.
23628 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23630 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23631 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23632 DAG.getConstant(ShAmt, MVT::i8));
23635 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23636 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23637 if (NeedsCondInvert) // Invert the condition if needed.
23638 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23639 DAG.getConstant(1, Cond.getValueType()));
23641 // Zero extend the condition if needed.
23642 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23643 FalseC->getValueType(0), Cond);
23644 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23645 SDValue(FalseC, 0));
23648 // Optimize cases that will turn into an LEA instruction. This requires
23649 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23650 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23651 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23652 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23654 bool isFastMultiplier = false;
23656 switch ((unsigned char)Diff) {
23658 case 1: // result = add base, cond
23659 case 2: // result = lea base( , cond*2)
23660 case 3: // result = lea base(cond, cond*2)
23661 case 4: // result = lea base( , cond*4)
23662 case 5: // result = lea base(cond, cond*4)
23663 case 8: // result = lea base( , cond*8)
23664 case 9: // result = lea base(cond, cond*8)
23665 isFastMultiplier = true;
23670 if (isFastMultiplier) {
23671 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23672 if (NeedsCondInvert) // Invert the condition if needed.
23673 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23674 DAG.getConstant(1, Cond.getValueType()));
23676 // Zero extend the condition if needed.
23677 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23679 // Scale the condition by the difference.
23681 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23682 DAG.getConstant(Diff, Cond.getValueType()));
23684 // Add the base if non-zero.
23685 if (FalseC->getAPIntValue() != 0)
23686 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23687 SDValue(FalseC, 0));
23694 // Canonicalize max and min:
23695 // (x > y) ? x : y -> (x >= y) ? x : y
23696 // (x < y) ? x : y -> (x <= y) ? x : y
23697 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23698 // the need for an extra compare
23699 // against zero. e.g.
23700 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23702 // testl %edi, %edi
23704 // cmovgl %edi, %eax
23708 // cmovsl %eax, %edi
23709 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23710 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23711 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23712 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23717 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23718 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23719 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23720 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23725 // Early exit check
23726 if (!TLI.isTypeLegal(VT))
23729 // Match VSELECTs into subs with unsigned saturation.
23730 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23731 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23732 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23733 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23734 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23736 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23737 // left side invert the predicate to simplify logic below.
23739 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23741 CC = ISD::getSetCCInverse(CC, true);
23742 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23746 if (Other.getNode() && Other->getNumOperands() == 2 &&
23747 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23748 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23749 SDValue CondRHS = Cond->getOperand(1);
23751 // Look for a general sub with unsigned saturation first.
23752 // x >= y ? x-y : 0 --> subus x, y
23753 // x > y ? x-y : 0 --> subus x, y
23754 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23755 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23756 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23758 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23759 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23760 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23761 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23762 // If the RHS is a constant we have to reverse the const
23763 // canonicalization.
23764 // x > C-1 ? x+-C : 0 --> subus x, C
23765 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23766 CondRHSConst->getAPIntValue() ==
23767 (-OpRHSConst->getAPIntValue() - 1))
23768 return DAG.getNode(
23769 X86ISD::SUBUS, DL, VT, OpLHS,
23770 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23772 // Another special case: If C was a sign bit, the sub has been
23773 // canonicalized into a xor.
23774 // FIXME: Would it be better to use computeKnownBits to determine
23775 // whether it's safe to decanonicalize the xor?
23776 // x s< 0 ? x^C : 0 --> subus x, C
23777 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23778 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23779 OpRHSConst->getAPIntValue().isSignBit())
23780 // Note that we have to rebuild the RHS constant here to ensure we
23781 // don't rely on particular values of undef lanes.
23782 return DAG.getNode(
23783 X86ISD::SUBUS, DL, VT, OpLHS,
23784 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23789 // Try to match a min/max vector operation.
23790 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23791 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23792 unsigned Opc = ret.first;
23793 bool NeedSplit = ret.second;
23795 if (Opc && NeedSplit) {
23796 unsigned NumElems = VT.getVectorNumElements();
23797 // Extract the LHS vectors
23798 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23799 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23801 // Extract the RHS vectors
23802 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23803 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23805 // Create min/max for each subvector
23806 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23807 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23809 // Merge the result
23810 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23812 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23815 // Simplify vector selection if condition value type matches vselect
23817 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23818 assert(Cond.getValueType().isVector() &&
23819 "vector select expects a vector selector!");
23821 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23822 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23824 // Try invert the condition if true value is not all 1s and false value
23826 if (!TValIsAllOnes && !FValIsAllZeros &&
23827 // Check if the selector will be produced by CMPP*/PCMP*
23828 Cond.getOpcode() == ISD::SETCC &&
23829 // Check if SETCC has already been promoted
23830 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23831 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23832 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23834 if (TValIsAllZeros || FValIsAllOnes) {
23835 SDValue CC = Cond.getOperand(2);
23836 ISD::CondCode NewCC =
23837 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23838 Cond.getOperand(0).getValueType().isInteger());
23839 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23840 std::swap(LHS, RHS);
23841 TValIsAllOnes = FValIsAllOnes;
23842 FValIsAllZeros = TValIsAllZeros;
23846 if (TValIsAllOnes || FValIsAllZeros) {
23849 if (TValIsAllOnes && FValIsAllZeros)
23851 else if (TValIsAllOnes)
23852 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23853 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23854 else if (FValIsAllZeros)
23855 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23856 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23858 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23862 // If we know that this node is legal then we know that it is going to be
23863 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23864 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23865 // to simplify previous instructions.
23866 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23867 !DCI.isBeforeLegalize() &&
23868 // We explicitly check against SSE4.1, v8i16 and v16i16 because, although
23869 // vselect nodes may be marked as Custom, they might only be legal when
23870 // Cond is a build_vector of constants. This will be taken care in
23871 // a later condition.
23872 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) &&
23873 Subtarget->hasSSE41() && VT != MVT::v16i16 && VT != MVT::v8i16) &&
23874 // Don't optimize vector of constants. Those are handled by
23875 // the generic code and all the bits must be properly set for
23876 // the generic optimizer.
23877 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
23878 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
23880 // Don't optimize vector selects that map to mask-registers.
23884 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
23885 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
23887 APInt KnownZero, KnownOne;
23888 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
23889 DCI.isBeforeLegalizeOps());
23890 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
23891 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
23893 // If we changed the computation somewhere in the DAG, this change
23894 // will affect all users of Cond.
23895 // Make sure it is fine and update all the nodes so that we do not
23896 // use the generic VSELECT anymore. Otherwise, we may perform
23897 // wrong optimizations as we messed up with the actual expectation
23898 // for the vector boolean values.
23899 if (Cond != TLO.Old) {
23900 // Check all uses of that condition operand to check whether it will be
23901 // consumed by non-BLEND instructions, which may depend on all bits are
23903 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23905 if (I->getOpcode() != ISD::VSELECT)
23906 // TODO: Add other opcodes eventually lowered into BLEND.
23909 // Update all the users of the condition, before committing the change,
23910 // so that the VSELECT optimizations that expect the correct vector
23911 // boolean value will not be triggered.
23912 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23914 DAG.ReplaceAllUsesOfValueWith(
23916 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
23917 Cond, I->getOperand(1), I->getOperand(2)));
23918 DCI.CommitTargetLoweringOpt(TLO);
23921 // At this point, only Cond is changed. Change the condition
23922 // just for N to keep the opportunity to optimize all other
23923 // users their own way.
23924 DAG.ReplaceAllUsesOfValueWith(
23926 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
23927 TLO.New, N->getOperand(1), N->getOperand(2)));
23932 // We should generate an X86ISD::BLENDI from a vselect if its argument
23933 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
23934 // constants. This specific pattern gets generated when we split a
23935 // selector for a 512 bit vector in a machine without AVX512 (but with
23936 // 256-bit vectors), during legalization:
23938 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
23940 // Iff we find this pattern and the build_vectors are built from
23941 // constants, we translate the vselect into a shuffle_vector that we
23942 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
23943 if ((N->getOpcode() == ISD::VSELECT ||
23944 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
23945 !DCI.isBeforeLegalize()) {
23946 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
23947 if (Shuffle.getNode())
23954 // Check whether a boolean test is testing a boolean value generated by
23955 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
23958 // Simplify the following patterns:
23959 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
23960 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
23961 // to (Op EFLAGS Cond)
23963 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
23964 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
23965 // to (Op EFLAGS !Cond)
23967 // where Op could be BRCOND or CMOV.
23969 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
23970 // Quit if not CMP and SUB with its value result used.
23971 if (Cmp.getOpcode() != X86ISD::CMP &&
23972 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
23975 // Quit if not used as a boolean value.
23976 if (CC != X86::COND_E && CC != X86::COND_NE)
23979 // Check CMP operands. One of them should be 0 or 1 and the other should be
23980 // an SetCC or extended from it.
23981 SDValue Op1 = Cmp.getOperand(0);
23982 SDValue Op2 = Cmp.getOperand(1);
23985 const ConstantSDNode* C = nullptr;
23986 bool needOppositeCond = (CC == X86::COND_E);
23987 bool checkAgainstTrue = false; // Is it a comparison against 1?
23989 if ((C = dyn_cast<ConstantSDNode>(Op1)))
23991 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
23993 else // Quit if all operands are not constants.
23996 if (C->getZExtValue() == 1) {
23997 needOppositeCond = !needOppositeCond;
23998 checkAgainstTrue = true;
23999 } else if (C->getZExtValue() != 0)
24000 // Quit if the constant is neither 0 or 1.
24003 bool truncatedToBoolWithAnd = false;
24004 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
24005 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
24006 SetCC.getOpcode() == ISD::TRUNCATE ||
24007 SetCC.getOpcode() == ISD::AND) {
24008 if (SetCC.getOpcode() == ISD::AND) {
24010 ConstantSDNode *CS;
24011 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
24012 CS->getZExtValue() == 1)
24014 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
24015 CS->getZExtValue() == 1)
24019 SetCC = SetCC.getOperand(OpIdx);
24020 truncatedToBoolWithAnd = true;
24022 SetCC = SetCC.getOperand(0);
24025 switch (SetCC.getOpcode()) {
24026 case X86ISD::SETCC_CARRY:
24027 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
24028 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
24029 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
24030 // truncated to i1 using 'and'.
24031 if (checkAgainstTrue && !truncatedToBoolWithAnd)
24033 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
24034 "Invalid use of SETCC_CARRY!");
24036 case X86ISD::SETCC:
24037 // Set the condition code or opposite one if necessary.
24038 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
24039 if (needOppositeCond)
24040 CC = X86::GetOppositeBranchCondition(CC);
24041 return SetCC.getOperand(1);
24042 case X86ISD::CMOV: {
24043 // Check whether false/true value has canonical one, i.e. 0 or 1.
24044 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
24045 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
24046 // Quit if true value is not a constant.
24049 // Quit if false value is not a constant.
24051 SDValue Op = SetCC.getOperand(0);
24052 // Skip 'zext' or 'trunc' node.
24053 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
24054 Op.getOpcode() == ISD::TRUNCATE)
24055 Op = Op.getOperand(0);
24056 // A special case for rdrand/rdseed, where 0 is set if false cond is
24058 if ((Op.getOpcode() != X86ISD::RDRAND &&
24059 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
24062 // Quit if false value is not the constant 0 or 1.
24063 bool FValIsFalse = true;
24064 if (FVal && FVal->getZExtValue() != 0) {
24065 if (FVal->getZExtValue() != 1)
24067 // If FVal is 1, opposite cond is needed.
24068 needOppositeCond = !needOppositeCond;
24069 FValIsFalse = false;
24071 // Quit if TVal is not the constant opposite of FVal.
24072 if (FValIsFalse && TVal->getZExtValue() != 1)
24074 if (!FValIsFalse && TVal->getZExtValue() != 0)
24076 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
24077 if (needOppositeCond)
24078 CC = X86::GetOppositeBranchCondition(CC);
24079 return SetCC.getOperand(3);
24086 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
24087 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
24088 TargetLowering::DAGCombinerInfo &DCI,
24089 const X86Subtarget *Subtarget) {
24092 // If the flag operand isn't dead, don't touch this CMOV.
24093 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
24096 SDValue FalseOp = N->getOperand(0);
24097 SDValue TrueOp = N->getOperand(1);
24098 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
24099 SDValue Cond = N->getOperand(3);
24101 if (CC == X86::COND_E || CC == X86::COND_NE) {
24102 switch (Cond.getOpcode()) {
24106 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
24107 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
24108 return (CC == X86::COND_E) ? FalseOp : TrueOp;
24114 Flags = checkBoolTestSetCCCombine(Cond, CC);
24115 if (Flags.getNode() &&
24116 // Extra check as FCMOV only supports a subset of X86 cond.
24117 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
24118 SDValue Ops[] = { FalseOp, TrueOp,
24119 DAG.getConstant(CC, MVT::i8), Flags };
24120 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
24123 // If this is a select between two integer constants, try to do some
24124 // optimizations. Note that the operands are ordered the opposite of SELECT
24126 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
24127 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
24128 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
24129 // larger than FalseC (the false value).
24130 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
24131 CC = X86::GetOppositeBranchCondition(CC);
24132 std::swap(TrueC, FalseC);
24133 std::swap(TrueOp, FalseOp);
24136 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
24137 // This is efficient for any integer data type (including i8/i16) and
24139 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
24140 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24141 DAG.getConstant(CC, MVT::i8), Cond);
24143 // Zero extend the condition if needed.
24144 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
24146 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
24147 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
24148 DAG.getConstant(ShAmt, MVT::i8));
24149 if (N->getNumValues() == 2) // Dead flag value?
24150 return DCI.CombineTo(N, Cond, SDValue());
24154 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
24155 // for any integer data type, including i8/i16.
24156 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
24157 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24158 DAG.getConstant(CC, MVT::i8), Cond);
24160 // Zero extend the condition if needed.
24161 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
24162 FalseC->getValueType(0), Cond);
24163 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24164 SDValue(FalseC, 0));
24166 if (N->getNumValues() == 2) // Dead flag value?
24167 return DCI.CombineTo(N, Cond, SDValue());
24171 // Optimize cases that will turn into an LEA instruction. This requires
24172 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
24173 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
24174 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
24175 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
24177 bool isFastMultiplier = false;
24179 switch ((unsigned char)Diff) {
24181 case 1: // result = add base, cond
24182 case 2: // result = lea base( , cond*2)
24183 case 3: // result = lea base(cond, cond*2)
24184 case 4: // result = lea base( , cond*4)
24185 case 5: // result = lea base(cond, cond*4)
24186 case 8: // result = lea base( , cond*8)
24187 case 9: // result = lea base(cond, cond*8)
24188 isFastMultiplier = true;
24193 if (isFastMultiplier) {
24194 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
24195 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24196 DAG.getConstant(CC, MVT::i8), Cond);
24197 // Zero extend the condition if needed.
24198 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
24200 // Scale the condition by the difference.
24202 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
24203 DAG.getConstant(Diff, Cond.getValueType()));
24205 // Add the base if non-zero.
24206 if (FalseC->getAPIntValue() != 0)
24207 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24208 SDValue(FalseC, 0));
24209 if (N->getNumValues() == 2) // Dead flag value?
24210 return DCI.CombineTo(N, Cond, SDValue());
24217 // Handle these cases:
24218 // (select (x != c), e, c) -> select (x != c), e, x),
24219 // (select (x == c), c, e) -> select (x == c), x, e)
24220 // where the c is an integer constant, and the "select" is the combination
24221 // of CMOV and CMP.
24223 // The rationale for this change is that the conditional-move from a constant
24224 // needs two instructions, however, conditional-move from a register needs
24225 // only one instruction.
24227 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
24228 // some instruction-combining opportunities. This opt needs to be
24229 // postponed as late as possible.
24231 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
24232 // the DCI.xxxx conditions are provided to postpone the optimization as
24233 // late as possible.
24235 ConstantSDNode *CmpAgainst = nullptr;
24236 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
24237 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
24238 !isa<ConstantSDNode>(Cond.getOperand(0))) {
24240 if (CC == X86::COND_NE &&
24241 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
24242 CC = X86::GetOppositeBranchCondition(CC);
24243 std::swap(TrueOp, FalseOp);
24246 if (CC == X86::COND_E &&
24247 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
24248 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24249 DAG.getConstant(CC, MVT::i8), Cond };
24250 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24258 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
24259 const X86Subtarget *Subtarget) {
24260 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
24262 default: return SDValue();
24263 // SSE/AVX/AVX2 blend intrinsics.
24264 case Intrinsic::x86_avx2_pblendvb:
24265 case Intrinsic::x86_avx2_pblendw:
24266 case Intrinsic::x86_avx2_pblendd_128:
24267 case Intrinsic::x86_avx2_pblendd_256:
24268 // Don't try to simplify this intrinsic if we don't have AVX2.
24269 if (!Subtarget->hasAVX2())
24272 case Intrinsic::x86_avx_blend_pd_256:
24273 case Intrinsic::x86_avx_blend_ps_256:
24274 case Intrinsic::x86_avx_blendv_pd_256:
24275 case Intrinsic::x86_avx_blendv_ps_256:
24276 // Don't try to simplify this intrinsic if we don't have AVX.
24277 if (!Subtarget->hasAVX())
24280 case Intrinsic::x86_sse41_pblendw:
24281 case Intrinsic::x86_sse41_blendpd:
24282 case Intrinsic::x86_sse41_blendps:
24283 case Intrinsic::x86_sse41_blendvps:
24284 case Intrinsic::x86_sse41_blendvpd:
24285 case Intrinsic::x86_sse41_pblendvb: {
24286 SDValue Op0 = N->getOperand(1);
24287 SDValue Op1 = N->getOperand(2);
24288 SDValue Mask = N->getOperand(3);
24290 // Don't try to simplify this intrinsic if we don't have SSE4.1.
24291 if (!Subtarget->hasSSE41())
24294 // fold (blend A, A, Mask) -> A
24297 // fold (blend A, B, allZeros) -> A
24298 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
24300 // fold (blend A, B, allOnes) -> B
24301 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24304 // Simplify the case where the mask is a constant i32 value.
24305 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
24306 if (C->isNullValue())
24308 if (C->isAllOnesValue())
24315 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24316 case Intrinsic::x86_sse2_psrai_w:
24317 case Intrinsic::x86_sse2_psrai_d:
24318 case Intrinsic::x86_avx2_psrai_w:
24319 case Intrinsic::x86_avx2_psrai_d:
24320 case Intrinsic::x86_sse2_psra_w:
24321 case Intrinsic::x86_sse2_psra_d:
24322 case Intrinsic::x86_avx2_psra_w:
24323 case Intrinsic::x86_avx2_psra_d: {
24324 SDValue Op0 = N->getOperand(1);
24325 SDValue Op1 = N->getOperand(2);
24326 EVT VT = Op0.getValueType();
24327 assert(VT.isVector() && "Expected a vector type!");
24329 if (isa<BuildVectorSDNode>(Op1))
24330 Op1 = Op1.getOperand(0);
24332 if (!isa<ConstantSDNode>(Op1))
24335 EVT SVT = VT.getVectorElementType();
24336 unsigned SVTBits = SVT.getSizeInBits();
24338 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24339 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24340 uint64_t ShAmt = C.getZExtValue();
24342 // Don't try to convert this shift into a ISD::SRA if the shift
24343 // count is bigger than or equal to the element size.
24344 if (ShAmt >= SVTBits)
24347 // Trivial case: if the shift count is zero, then fold this
24348 // into the first operand.
24352 // Replace this packed shift intrinsic with a target independent
24354 SDValue Splat = DAG.getConstant(C, VT);
24355 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24360 /// PerformMulCombine - Optimize a single multiply with constant into two
24361 /// in order to implement it with two cheaper instructions, e.g.
24362 /// LEA + SHL, LEA + LEA.
24363 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24364 TargetLowering::DAGCombinerInfo &DCI) {
24365 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24368 EVT VT = N->getValueType(0);
24369 if (VT != MVT::i64 && VT != MVT::i32)
24372 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24375 uint64_t MulAmt = C->getZExtValue();
24376 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24379 uint64_t MulAmt1 = 0;
24380 uint64_t MulAmt2 = 0;
24381 if ((MulAmt % 9) == 0) {
24383 MulAmt2 = MulAmt / 9;
24384 } else if ((MulAmt % 5) == 0) {
24386 MulAmt2 = MulAmt / 5;
24387 } else if ((MulAmt % 3) == 0) {
24389 MulAmt2 = MulAmt / 3;
24392 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24395 if (isPowerOf2_64(MulAmt2) &&
24396 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24397 // If second multiplifer is pow2, issue it first. We want the multiply by
24398 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24400 std::swap(MulAmt1, MulAmt2);
24403 if (isPowerOf2_64(MulAmt1))
24404 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24405 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24407 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24408 DAG.getConstant(MulAmt1, VT));
24410 if (isPowerOf2_64(MulAmt2))
24411 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24412 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24414 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24415 DAG.getConstant(MulAmt2, VT));
24417 // Do not add new nodes to DAG combiner worklist.
24418 DCI.CombineTo(N, NewMul, false);
24423 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24424 SDValue N0 = N->getOperand(0);
24425 SDValue N1 = N->getOperand(1);
24426 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24427 EVT VT = N0.getValueType();
24429 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24430 // since the result of setcc_c is all zero's or all ones.
24431 if (VT.isInteger() && !VT.isVector() &&
24432 N1C && N0.getOpcode() == ISD::AND &&
24433 N0.getOperand(1).getOpcode() == ISD::Constant) {
24434 SDValue N00 = N0.getOperand(0);
24435 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24436 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24437 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24438 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24439 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24440 APInt ShAmt = N1C->getAPIntValue();
24441 Mask = Mask.shl(ShAmt);
24443 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24444 N00, DAG.getConstant(Mask, VT));
24448 // Hardware support for vector shifts is sparse which makes us scalarize the
24449 // vector operations in many cases. Also, on sandybridge ADD is faster than
24451 // (shl V, 1) -> add V,V
24452 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24453 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24454 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24455 // We shift all of the values by one. In many cases we do not have
24456 // hardware support for this operation. This is better expressed as an ADD
24458 if (N1SplatC->getZExtValue() == 1)
24459 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24465 /// \brief Returns a vector of 0s if the node in input is a vector logical
24466 /// shift by a constant amount which is known to be bigger than or equal
24467 /// to the vector element size in bits.
24468 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24469 const X86Subtarget *Subtarget) {
24470 EVT VT = N->getValueType(0);
24472 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24473 (!Subtarget->hasInt256() ||
24474 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24477 SDValue Amt = N->getOperand(1);
24479 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24480 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24481 APInt ShiftAmt = AmtSplat->getAPIntValue();
24482 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24484 // SSE2/AVX2 logical shifts always return a vector of 0s
24485 // if the shift amount is bigger than or equal to
24486 // the element size. The constant shift amount will be
24487 // encoded as a 8-bit immediate.
24488 if (ShiftAmt.trunc(8).uge(MaxAmount))
24489 return getZeroVector(VT, Subtarget, DAG, DL);
24495 /// PerformShiftCombine - Combine shifts.
24496 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24497 TargetLowering::DAGCombinerInfo &DCI,
24498 const X86Subtarget *Subtarget) {
24499 if (N->getOpcode() == ISD::SHL) {
24500 SDValue V = PerformSHLCombine(N, DAG);
24501 if (V.getNode()) return V;
24504 if (N->getOpcode() != ISD::SRA) {
24505 // Try to fold this logical shift into a zero vector.
24506 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24507 if (V.getNode()) return V;
24513 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24514 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24515 // and friends. Likewise for OR -> CMPNEQSS.
24516 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24517 TargetLowering::DAGCombinerInfo &DCI,
24518 const X86Subtarget *Subtarget) {
24521 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24522 // we're requiring SSE2 for both.
24523 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24524 SDValue N0 = N->getOperand(0);
24525 SDValue N1 = N->getOperand(1);
24526 SDValue CMP0 = N0->getOperand(1);
24527 SDValue CMP1 = N1->getOperand(1);
24530 // The SETCCs should both refer to the same CMP.
24531 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24534 SDValue CMP00 = CMP0->getOperand(0);
24535 SDValue CMP01 = CMP0->getOperand(1);
24536 EVT VT = CMP00.getValueType();
24538 if (VT == MVT::f32 || VT == MVT::f64) {
24539 bool ExpectingFlags = false;
24540 // Check for any users that want flags:
24541 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24542 !ExpectingFlags && UI != UE; ++UI)
24543 switch (UI->getOpcode()) {
24548 ExpectingFlags = true;
24550 case ISD::CopyToReg:
24551 case ISD::SIGN_EXTEND:
24552 case ISD::ZERO_EXTEND:
24553 case ISD::ANY_EXTEND:
24557 if (!ExpectingFlags) {
24558 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24559 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24561 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24562 X86::CondCode tmp = cc0;
24567 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24568 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24569 // FIXME: need symbolic constants for these magic numbers.
24570 // See X86ATTInstPrinter.cpp:printSSECC().
24571 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24572 if (Subtarget->hasAVX512()) {
24573 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24574 CMP01, DAG.getConstant(x86cc, MVT::i8));
24575 if (N->getValueType(0) != MVT::i1)
24576 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24580 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24581 CMP00.getValueType(), CMP00, CMP01,
24582 DAG.getConstant(x86cc, MVT::i8));
24584 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24585 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24587 if (is64BitFP && !Subtarget->is64Bit()) {
24588 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24589 // 64-bit integer, since that's not a legal type. Since
24590 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24591 // bits, but can do this little dance to extract the lowest 32 bits
24592 // and work with those going forward.
24593 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24595 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24597 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24598 Vector32, DAG.getIntPtrConstant(0));
24602 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24603 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24604 DAG.getConstant(1, IntVT));
24605 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24606 return OneBitOfTruth;
24614 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24615 /// so it can be folded inside ANDNP.
24616 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24617 EVT VT = N->getValueType(0);
24619 // Match direct AllOnes for 128 and 256-bit vectors
24620 if (ISD::isBuildVectorAllOnes(N))
24623 // Look through a bit convert.
24624 if (N->getOpcode() == ISD::BITCAST)
24625 N = N->getOperand(0).getNode();
24627 // Sometimes the operand may come from a insert_subvector building a 256-bit
24629 if (VT.is256BitVector() &&
24630 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24631 SDValue V1 = N->getOperand(0);
24632 SDValue V2 = N->getOperand(1);
24634 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24635 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24636 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24637 ISD::isBuildVectorAllOnes(V2.getNode()))
24644 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24645 // register. In most cases we actually compare or select YMM-sized registers
24646 // and mixing the two types creates horrible code. This method optimizes
24647 // some of the transition sequences.
24648 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24649 TargetLowering::DAGCombinerInfo &DCI,
24650 const X86Subtarget *Subtarget) {
24651 EVT VT = N->getValueType(0);
24652 if (!VT.is256BitVector())
24655 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24656 N->getOpcode() == ISD::ZERO_EXTEND ||
24657 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24659 SDValue Narrow = N->getOperand(0);
24660 EVT NarrowVT = Narrow->getValueType(0);
24661 if (!NarrowVT.is128BitVector())
24664 if (Narrow->getOpcode() != ISD::XOR &&
24665 Narrow->getOpcode() != ISD::AND &&
24666 Narrow->getOpcode() != ISD::OR)
24669 SDValue N0 = Narrow->getOperand(0);
24670 SDValue N1 = Narrow->getOperand(1);
24673 // The Left side has to be a trunc.
24674 if (N0.getOpcode() != ISD::TRUNCATE)
24677 // The type of the truncated inputs.
24678 EVT WideVT = N0->getOperand(0)->getValueType(0);
24682 // The right side has to be a 'trunc' or a constant vector.
24683 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24684 ConstantSDNode *RHSConstSplat = nullptr;
24685 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24686 RHSConstSplat = RHSBV->getConstantSplatNode();
24687 if (!RHSTrunc && !RHSConstSplat)
24690 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24692 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24695 // Set N0 and N1 to hold the inputs to the new wide operation.
24696 N0 = N0->getOperand(0);
24697 if (RHSConstSplat) {
24698 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24699 SDValue(RHSConstSplat, 0));
24700 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24701 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24702 } else if (RHSTrunc) {
24703 N1 = N1->getOperand(0);
24706 // Generate the wide operation.
24707 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24708 unsigned Opcode = N->getOpcode();
24710 case ISD::ANY_EXTEND:
24712 case ISD::ZERO_EXTEND: {
24713 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24714 APInt Mask = APInt::getAllOnesValue(InBits);
24715 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24716 return DAG.getNode(ISD::AND, DL, VT,
24717 Op, DAG.getConstant(Mask, VT));
24719 case ISD::SIGN_EXTEND:
24720 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24721 Op, DAG.getValueType(NarrowVT));
24723 llvm_unreachable("Unexpected opcode");
24727 static SDValue VectorZextCombine(SDNode *N, SelectionDAG &DAG,
24728 TargetLowering::DAGCombinerInfo &DCI,
24729 const X86Subtarget *Subtarget) {
24730 SDValue N0 = N->getOperand(0);
24731 SDValue N1 = N->getOperand(1);
24734 // A vector zext_in_reg may be represented as a shuffle,
24735 // feeding into a bitcast (this represents anyext) feeding into
24736 // an and with a mask.
24737 // We'd like to try to combine that into a shuffle with zero
24738 // plus a bitcast, removing the and.
24739 if (N0.getOpcode() != ISD::BITCAST ||
24740 N0.getOperand(0).getOpcode() != ISD::VECTOR_SHUFFLE)
24743 // The other side of the AND should be a splat of 2^C, where C
24744 // is the number of bits in the source type.
24745 if (N1.getOpcode() == ISD::BITCAST)
24746 N1 = N1.getOperand(0);
24747 if (N1.getOpcode() != ISD::BUILD_VECTOR)
24749 BuildVectorSDNode *Vector = cast<BuildVectorSDNode>(N1);
24751 ShuffleVectorSDNode *Shuffle = cast<ShuffleVectorSDNode>(N0.getOperand(0));
24752 EVT SrcType = Shuffle->getValueType(0);
24754 // We expect a single-source shuffle
24755 if (Shuffle->getOperand(1)->getOpcode() != ISD::UNDEF)
24758 unsigned SrcSize = SrcType.getScalarSizeInBits();
24760 APInt SplatValue, SplatUndef;
24761 unsigned SplatBitSize;
24763 if (!Vector->isConstantSplat(SplatValue, SplatUndef,
24764 SplatBitSize, HasAnyUndefs))
24767 unsigned ResSize = N1.getValueType().getScalarSizeInBits();
24768 // Make sure the splat matches the mask we expect
24769 if (SplatBitSize > ResSize ||
24770 (SplatValue + 1).exactLogBase2() != (int)SrcSize)
24773 // Make sure the input and output size make sense
24774 if (SrcSize >= ResSize || ResSize % SrcSize)
24777 // We expect a shuffle of the form <0, u, u, u, 1, u, u, u...>
24778 // The number of u's between each two values depends on the ratio between
24779 // the source and dest type.
24780 unsigned ZextRatio = ResSize / SrcSize;
24781 bool IsZext = true;
24782 for (unsigned i = 0; i < SrcType.getVectorNumElements(); ++i) {
24783 if (i % ZextRatio) {
24784 if (Shuffle->getMaskElt(i) > 0) {
24790 if (Shuffle->getMaskElt(i) != (int)(i / ZextRatio)) {
24791 // Expected element number
24801 // Ok, perform the transformation - replace the shuffle with
24802 // a shuffle of the form <0, k, k, k, 1, k, k, k> with zero
24803 // (instead of undef) where the k elements come from the zero vector.
24804 SmallVector<int, 8> Mask;
24805 unsigned NumElems = SrcType.getVectorNumElements();
24806 for (unsigned i = 0; i < NumElems; ++i)
24808 Mask.push_back(NumElems);
24810 Mask.push_back(i / ZextRatio);
24812 SDValue NewShuffle = DAG.getVectorShuffle(Shuffle->getValueType(0), DL,
24813 Shuffle->getOperand(0), DAG.getConstant(0, SrcType), Mask);
24814 return DAG.getNode(ISD::BITCAST, DL, N0.getValueType(), NewShuffle);
24817 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24818 TargetLowering::DAGCombinerInfo &DCI,
24819 const X86Subtarget *Subtarget) {
24820 if (DCI.isBeforeLegalizeOps())
24823 SDValue Zext = VectorZextCombine(N, DAG, DCI, Subtarget);
24824 if (Zext.getNode())
24827 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24831 EVT VT = N->getValueType(0);
24832 SDValue N0 = N->getOperand(0);
24833 SDValue N1 = N->getOperand(1);
24836 // Create BEXTR instructions
24837 // BEXTR is ((X >> imm) & (2**size-1))
24838 if (VT == MVT::i32 || VT == MVT::i64) {
24839 // Check for BEXTR.
24840 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24841 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24842 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24843 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24844 if (MaskNode && ShiftNode) {
24845 uint64_t Mask = MaskNode->getZExtValue();
24846 uint64_t Shift = ShiftNode->getZExtValue();
24847 if (isMask_64(Mask)) {
24848 uint64_t MaskSize = countPopulation(Mask);
24849 if (Shift + MaskSize <= VT.getSizeInBits())
24850 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24851 DAG.getConstant(Shift | (MaskSize << 8), VT));
24859 // Want to form ANDNP nodes:
24860 // 1) In the hopes of then easily combining them with OR and AND nodes
24861 // to form PBLEND/PSIGN.
24862 // 2) To match ANDN packed intrinsics
24863 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24866 // Check LHS for vnot
24867 if (N0.getOpcode() == ISD::XOR &&
24868 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24869 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24870 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24872 // Check RHS for vnot
24873 if (N1.getOpcode() == ISD::XOR &&
24874 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24875 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24876 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24881 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24882 TargetLowering::DAGCombinerInfo &DCI,
24883 const X86Subtarget *Subtarget) {
24884 if (DCI.isBeforeLegalizeOps())
24887 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24891 SDValue N0 = N->getOperand(0);
24892 SDValue N1 = N->getOperand(1);
24893 EVT VT = N->getValueType(0);
24895 // look for psign/blend
24896 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24897 if (!Subtarget->hasSSSE3() ||
24898 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24901 // Canonicalize pandn to RHS
24902 if (N0.getOpcode() == X86ISD::ANDNP)
24904 // or (and (m, y), (pandn m, x))
24905 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24906 SDValue Mask = N1.getOperand(0);
24907 SDValue X = N1.getOperand(1);
24909 if (N0.getOperand(0) == Mask)
24910 Y = N0.getOperand(1);
24911 if (N0.getOperand(1) == Mask)
24912 Y = N0.getOperand(0);
24914 // Check to see if the mask appeared in both the AND and ANDNP and
24918 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24919 // Look through mask bitcast.
24920 if (Mask.getOpcode() == ISD::BITCAST)
24921 Mask = Mask.getOperand(0);
24922 if (X.getOpcode() == ISD::BITCAST)
24923 X = X.getOperand(0);
24924 if (Y.getOpcode() == ISD::BITCAST)
24925 Y = Y.getOperand(0);
24927 EVT MaskVT = Mask.getValueType();
24929 // Validate that the Mask operand is a vector sra node.
24930 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24931 // there is no psrai.b
24932 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24933 unsigned SraAmt = ~0;
24934 if (Mask.getOpcode() == ISD::SRA) {
24935 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24936 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24937 SraAmt = AmtConst->getZExtValue();
24938 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24939 SDValue SraC = Mask.getOperand(1);
24940 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24942 if ((SraAmt + 1) != EltBits)
24947 // Now we know we at least have a plendvb with the mask val. See if
24948 // we can form a psignb/w/d.
24949 // psign = x.type == y.type == mask.type && y = sub(0, x);
24950 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24951 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24952 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24953 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24954 "Unsupported VT for PSIGN");
24955 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24956 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24958 // PBLENDVB only available on SSE 4.1
24959 if (!Subtarget->hasSSE41())
24962 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
24964 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
24965 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
24966 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
24967 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
24968 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24972 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
24975 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
24976 MachineFunction &MF = DAG.getMachineFunction();
24978 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
24980 // SHLD/SHRD instructions have lower register pressure, but on some
24981 // platforms they have higher latency than the equivalent
24982 // series of shifts/or that would otherwise be generated.
24983 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
24984 // have higher latencies and we are not optimizing for size.
24985 if (!OptForSize && Subtarget->isSHLDSlow())
24988 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
24990 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
24992 if (!N0.hasOneUse() || !N1.hasOneUse())
24995 SDValue ShAmt0 = N0.getOperand(1);
24996 if (ShAmt0.getValueType() != MVT::i8)
24998 SDValue ShAmt1 = N1.getOperand(1);
24999 if (ShAmt1.getValueType() != MVT::i8)
25001 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
25002 ShAmt0 = ShAmt0.getOperand(0);
25003 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
25004 ShAmt1 = ShAmt1.getOperand(0);
25007 unsigned Opc = X86ISD::SHLD;
25008 SDValue Op0 = N0.getOperand(0);
25009 SDValue Op1 = N1.getOperand(0);
25010 if (ShAmt0.getOpcode() == ISD::SUB) {
25011 Opc = X86ISD::SHRD;
25012 std::swap(Op0, Op1);
25013 std::swap(ShAmt0, ShAmt1);
25016 unsigned Bits = VT.getSizeInBits();
25017 if (ShAmt1.getOpcode() == ISD::SUB) {
25018 SDValue Sum = ShAmt1.getOperand(0);
25019 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
25020 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
25021 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
25022 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
25023 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
25024 return DAG.getNode(Opc, DL, VT,
25026 DAG.getNode(ISD::TRUNCATE, DL,
25029 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
25030 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
25032 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
25033 return DAG.getNode(Opc, DL, VT,
25034 N0.getOperand(0), N1.getOperand(0),
25035 DAG.getNode(ISD::TRUNCATE, DL,
25042 // Generate NEG and CMOV for integer abs.
25043 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
25044 EVT VT = N->getValueType(0);
25046 // Since X86 does not have CMOV for 8-bit integer, we don't convert
25047 // 8-bit integer abs to NEG and CMOV.
25048 if (VT.isInteger() && VT.getSizeInBits() == 8)
25051 SDValue N0 = N->getOperand(0);
25052 SDValue N1 = N->getOperand(1);
25055 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
25056 // and change it to SUB and CMOV.
25057 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
25058 N0.getOpcode() == ISD::ADD &&
25059 N0.getOperand(1) == N1 &&
25060 N1.getOpcode() == ISD::SRA &&
25061 N1.getOperand(0) == N0.getOperand(0))
25062 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
25063 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
25064 // Generate SUB & CMOV.
25065 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
25066 DAG.getConstant(0, VT), N0.getOperand(0));
25068 SDValue Ops[] = { N0.getOperand(0), Neg,
25069 DAG.getConstant(X86::COND_GE, MVT::i8),
25070 SDValue(Neg.getNode(), 1) };
25071 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
25076 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
25077 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
25078 TargetLowering::DAGCombinerInfo &DCI,
25079 const X86Subtarget *Subtarget) {
25080 if (DCI.isBeforeLegalizeOps())
25083 if (Subtarget->hasCMov()) {
25084 SDValue RV = performIntegerAbsCombine(N, DAG);
25092 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
25093 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
25094 TargetLowering::DAGCombinerInfo &DCI,
25095 const X86Subtarget *Subtarget) {
25096 LoadSDNode *Ld = cast<LoadSDNode>(N);
25097 EVT RegVT = Ld->getValueType(0);
25098 EVT MemVT = Ld->getMemoryVT();
25100 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25102 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
25103 // into two 16-byte operations.
25104 ISD::LoadExtType Ext = Ld->getExtensionType();
25105 unsigned Alignment = Ld->getAlignment();
25106 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
25107 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25108 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
25109 unsigned NumElems = RegVT.getVectorNumElements();
25113 SDValue Ptr = Ld->getBasePtr();
25114 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
25116 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
25118 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25119 Ld->getPointerInfo(), Ld->isVolatile(),
25120 Ld->isNonTemporal(), Ld->isInvariant(),
25122 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25123 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25124 Ld->getPointerInfo(), Ld->isVolatile(),
25125 Ld->isNonTemporal(), Ld->isInvariant(),
25126 std::min(16U, Alignment));
25127 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
25129 Load2.getValue(1));
25131 SDValue NewVec = DAG.getUNDEF(RegVT);
25132 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
25133 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
25134 return DCI.CombineTo(N, NewVec, TF, true);
25140 /// PerformMLOADCombine - Resolve extending loads
25141 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
25142 TargetLowering::DAGCombinerInfo &DCI,
25143 const X86Subtarget *Subtarget) {
25144 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
25145 if (Mld->getExtensionType() != ISD::SEXTLOAD)
25148 EVT VT = Mld->getValueType(0);
25149 unsigned NumElems = VT.getVectorNumElements();
25150 EVT LdVT = Mld->getMemoryVT();
25153 assert(LdVT != VT && "Cannot extend to the same type");
25154 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
25155 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
25156 // From, To sizes and ElemCount must be pow of two
25157 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25158 "Unexpected size for extending masked load");
25160 unsigned SizeRatio = ToSz / FromSz;
25161 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
25163 // Create a type on which we perform the shuffle
25164 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25165 LdVT.getScalarType(), NumElems*SizeRatio);
25166 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25168 // Convert Src0 value
25169 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
25170 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
25171 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25172 for (unsigned i = 0; i != NumElems; ++i)
25173 ShuffleVec[i] = i * SizeRatio;
25175 // Can't shuffle using an illegal type.
25176 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25177 && "WideVecVT should be legal");
25178 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
25179 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
25181 // Prepare the new mask
25183 SDValue Mask = Mld->getMask();
25184 if (Mask.getValueType() == VT) {
25185 // Mask and original value have the same type
25186 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25187 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25188 for (unsigned i = 0; i != NumElems; ++i)
25189 ShuffleVec[i] = i * SizeRatio;
25190 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25191 ShuffleVec[i] = NumElems*SizeRatio;
25192 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25193 DAG.getConstant(0, WideVecVT),
25197 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25198 unsigned WidenNumElts = NumElems*SizeRatio;
25199 unsigned MaskNumElts = VT.getVectorNumElements();
25200 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25203 unsigned NumConcat = WidenNumElts / MaskNumElts;
25204 SmallVector<SDValue, 16> Ops(NumConcat);
25205 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25207 for (unsigned i = 1; i != NumConcat; ++i)
25210 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25213 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
25214 Mld->getBasePtr(), NewMask, WideSrc0,
25215 Mld->getMemoryVT(), Mld->getMemOperand(),
25217 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
25218 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
25221 /// PerformMSTORECombine - Resolve truncating stores
25222 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
25223 const X86Subtarget *Subtarget) {
25224 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
25225 if (!Mst->isTruncatingStore())
25228 EVT VT = Mst->getValue().getValueType();
25229 unsigned NumElems = VT.getVectorNumElements();
25230 EVT StVT = Mst->getMemoryVT();
25233 assert(StVT != VT && "Cannot truncate to the same type");
25234 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25235 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25237 // From, To sizes and ElemCount must be pow of two
25238 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25239 "Unexpected size for truncating masked store");
25240 // We are going to use the original vector elt for storing.
25241 // Accumulated smaller vector elements must be a multiple of the store size.
25242 assert (((NumElems * FromSz) % ToSz) == 0 &&
25243 "Unexpected ratio for truncating masked store");
25245 unsigned SizeRatio = FromSz / ToSz;
25246 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25248 // Create a type on which we perform the shuffle
25249 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25250 StVT.getScalarType(), NumElems*SizeRatio);
25252 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25254 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
25255 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25256 for (unsigned i = 0; i != NumElems; ++i)
25257 ShuffleVec[i] = i * SizeRatio;
25259 // Can't shuffle using an illegal type.
25260 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25261 && "WideVecVT should be legal");
25263 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25264 DAG.getUNDEF(WideVecVT),
25268 SDValue Mask = Mst->getMask();
25269 if (Mask.getValueType() == VT) {
25270 // Mask and original value have the same type
25271 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25272 for (unsigned i = 0; i != NumElems; ++i)
25273 ShuffleVec[i] = i * SizeRatio;
25274 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25275 ShuffleVec[i] = NumElems*SizeRatio;
25276 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25277 DAG.getConstant(0, WideVecVT),
25281 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25282 unsigned WidenNumElts = NumElems*SizeRatio;
25283 unsigned MaskNumElts = VT.getVectorNumElements();
25284 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25287 unsigned NumConcat = WidenNumElts / MaskNumElts;
25288 SmallVector<SDValue, 16> Ops(NumConcat);
25289 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25291 for (unsigned i = 1; i != NumConcat; ++i)
25294 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25297 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
25298 NewMask, StVT, Mst->getMemOperand(), false);
25300 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
25301 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
25302 const X86Subtarget *Subtarget) {
25303 StoreSDNode *St = cast<StoreSDNode>(N);
25304 EVT VT = St->getValue().getValueType();
25305 EVT StVT = St->getMemoryVT();
25307 SDValue StoredVal = St->getOperand(1);
25308 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25310 // If we are saving a concatenation of two XMM registers and 32-byte stores
25311 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
25312 unsigned Alignment = St->getAlignment();
25313 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
25314 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25315 StVT == VT && !IsAligned) {
25316 unsigned NumElems = VT.getVectorNumElements();
25320 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
25321 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
25323 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
25324 SDValue Ptr0 = St->getBasePtr();
25325 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
25327 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
25328 St->getPointerInfo(), St->isVolatile(),
25329 St->isNonTemporal(), Alignment);
25330 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
25331 St->getPointerInfo(), St->isVolatile(),
25332 St->isNonTemporal(),
25333 std::min(16U, Alignment));
25334 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
25337 // Optimize trunc store (of multiple scalars) to shuffle and store.
25338 // First, pack all of the elements in one place. Next, store to memory
25339 // in fewer chunks.
25340 if (St->isTruncatingStore() && VT.isVector()) {
25341 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25342 unsigned NumElems = VT.getVectorNumElements();
25343 assert(StVT != VT && "Cannot truncate to the same type");
25344 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25345 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25347 // From, To sizes and ElemCount must be pow of two
25348 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
25349 // We are going to use the original vector elt for storing.
25350 // Accumulated smaller vector elements must be a multiple of the store size.
25351 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
25353 unsigned SizeRatio = FromSz / ToSz;
25355 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25357 // Create a type on which we perform the shuffle
25358 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25359 StVT.getScalarType(), NumElems*SizeRatio);
25361 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25363 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
25364 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
25365 for (unsigned i = 0; i != NumElems; ++i)
25366 ShuffleVec[i] = i * SizeRatio;
25368 // Can't shuffle using an illegal type.
25369 if (!TLI.isTypeLegal(WideVecVT))
25372 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25373 DAG.getUNDEF(WideVecVT),
25375 // At this point all of the data is stored at the bottom of the
25376 // register. We now need to save it to mem.
25378 // Find the largest store unit
25379 MVT StoreType = MVT::i8;
25380 for (MVT Tp : MVT::integer_valuetypes()) {
25381 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
25385 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
25386 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
25387 (64 <= NumElems * ToSz))
25388 StoreType = MVT::f64;
25390 // Bitcast the original vector into a vector of store-size units
25391 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
25392 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
25393 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
25394 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
25395 SmallVector<SDValue, 8> Chains;
25396 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25397 TLI.getPointerTy());
25398 SDValue Ptr = St->getBasePtr();
25400 // Perform one or more big stores into memory.
25401 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25402 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25403 StoreType, ShuffWide,
25404 DAG.getIntPtrConstant(i));
25405 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25406 St->getPointerInfo(), St->isVolatile(),
25407 St->isNonTemporal(), St->getAlignment());
25408 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25409 Chains.push_back(Ch);
25412 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25415 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25416 // the FP state in cases where an emms may be missing.
25417 // A preferable solution to the general problem is to figure out the right
25418 // places to insert EMMS. This qualifies as a quick hack.
25420 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25421 if (VT.getSizeInBits() != 64)
25424 const Function *F = DAG.getMachineFunction().getFunction();
25425 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
25426 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25427 && Subtarget->hasSSE2();
25428 if ((VT.isVector() ||
25429 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25430 isa<LoadSDNode>(St->getValue()) &&
25431 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25432 St->getChain().hasOneUse() && !St->isVolatile()) {
25433 SDNode* LdVal = St->getValue().getNode();
25434 LoadSDNode *Ld = nullptr;
25435 int TokenFactorIndex = -1;
25436 SmallVector<SDValue, 8> Ops;
25437 SDNode* ChainVal = St->getChain().getNode();
25438 // Must be a store of a load. We currently handle two cases: the load
25439 // is a direct child, and it's under an intervening TokenFactor. It is
25440 // possible to dig deeper under nested TokenFactors.
25441 if (ChainVal == LdVal)
25442 Ld = cast<LoadSDNode>(St->getChain());
25443 else if (St->getValue().hasOneUse() &&
25444 ChainVal->getOpcode() == ISD::TokenFactor) {
25445 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25446 if (ChainVal->getOperand(i).getNode() == LdVal) {
25447 TokenFactorIndex = i;
25448 Ld = cast<LoadSDNode>(St->getValue());
25450 Ops.push_back(ChainVal->getOperand(i));
25454 if (!Ld || !ISD::isNormalLoad(Ld))
25457 // If this is not the MMX case, i.e. we are just turning i64 load/store
25458 // into f64 load/store, avoid the transformation if there are multiple
25459 // uses of the loaded value.
25460 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25465 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25466 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25468 if (Subtarget->is64Bit() || F64IsLegal) {
25469 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25470 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25471 Ld->getPointerInfo(), Ld->isVolatile(),
25472 Ld->isNonTemporal(), Ld->isInvariant(),
25473 Ld->getAlignment());
25474 SDValue NewChain = NewLd.getValue(1);
25475 if (TokenFactorIndex != -1) {
25476 Ops.push_back(NewChain);
25477 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25479 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25480 St->getPointerInfo(),
25481 St->isVolatile(), St->isNonTemporal(),
25482 St->getAlignment());
25485 // Otherwise, lower to two pairs of 32-bit loads / stores.
25486 SDValue LoAddr = Ld->getBasePtr();
25487 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25488 DAG.getConstant(4, MVT::i32));
25490 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25491 Ld->getPointerInfo(),
25492 Ld->isVolatile(), Ld->isNonTemporal(),
25493 Ld->isInvariant(), Ld->getAlignment());
25494 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25495 Ld->getPointerInfo().getWithOffset(4),
25496 Ld->isVolatile(), Ld->isNonTemporal(),
25498 MinAlign(Ld->getAlignment(), 4));
25500 SDValue NewChain = LoLd.getValue(1);
25501 if (TokenFactorIndex != -1) {
25502 Ops.push_back(LoLd);
25503 Ops.push_back(HiLd);
25504 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25507 LoAddr = St->getBasePtr();
25508 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25509 DAG.getConstant(4, MVT::i32));
25511 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25512 St->getPointerInfo(),
25513 St->isVolatile(), St->isNonTemporal(),
25514 St->getAlignment());
25515 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25516 St->getPointerInfo().getWithOffset(4),
25518 St->isNonTemporal(),
25519 MinAlign(St->getAlignment(), 4));
25520 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25525 /// Return 'true' if this vector operation is "horizontal"
25526 /// and return the operands for the horizontal operation in LHS and RHS. A
25527 /// horizontal operation performs the binary operation on successive elements
25528 /// of its first operand, then on successive elements of its second operand,
25529 /// returning the resulting values in a vector. For example, if
25530 /// A = < float a0, float a1, float a2, float a3 >
25532 /// B = < float b0, float b1, float b2, float b3 >
25533 /// then the result of doing a horizontal operation on A and B is
25534 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25535 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25536 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25537 /// set to A, RHS to B, and the routine returns 'true'.
25538 /// Note that the binary operation should have the property that if one of the
25539 /// operands is UNDEF then the result is UNDEF.
25540 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25541 // Look for the following pattern: if
25542 // A = < float a0, float a1, float a2, float a3 >
25543 // B = < float b0, float b1, float b2, float b3 >
25545 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25546 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25547 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25548 // which is A horizontal-op B.
25550 // At least one of the operands should be a vector shuffle.
25551 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25552 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25555 MVT VT = LHS.getSimpleValueType();
25557 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25558 "Unsupported vector type for horizontal add/sub");
25560 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25561 // operate independently on 128-bit lanes.
25562 unsigned NumElts = VT.getVectorNumElements();
25563 unsigned NumLanes = VT.getSizeInBits()/128;
25564 unsigned NumLaneElts = NumElts / NumLanes;
25565 assert((NumLaneElts % 2 == 0) &&
25566 "Vector type should have an even number of elements in each lane");
25567 unsigned HalfLaneElts = NumLaneElts/2;
25569 // View LHS in the form
25570 // LHS = VECTOR_SHUFFLE A, B, LMask
25571 // If LHS is not a shuffle then pretend it is the shuffle
25572 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25573 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25576 SmallVector<int, 16> LMask(NumElts);
25577 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25578 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25579 A = LHS.getOperand(0);
25580 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25581 B = LHS.getOperand(1);
25582 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25583 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25585 if (LHS.getOpcode() != ISD::UNDEF)
25587 for (unsigned i = 0; i != NumElts; ++i)
25591 // Likewise, view RHS in the form
25592 // RHS = VECTOR_SHUFFLE C, D, RMask
25594 SmallVector<int, 16> RMask(NumElts);
25595 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25596 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25597 C = RHS.getOperand(0);
25598 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25599 D = RHS.getOperand(1);
25600 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25601 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25603 if (RHS.getOpcode() != ISD::UNDEF)
25605 for (unsigned i = 0; i != NumElts; ++i)
25609 // Check that the shuffles are both shuffling the same vectors.
25610 if (!(A == C && B == D) && !(A == D && B == C))
25613 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25614 if (!A.getNode() && !B.getNode())
25617 // If A and B occur in reverse order in RHS, then "swap" them (which means
25618 // rewriting the mask).
25620 CommuteVectorShuffleMask(RMask, NumElts);
25622 // At this point LHS and RHS are equivalent to
25623 // LHS = VECTOR_SHUFFLE A, B, LMask
25624 // RHS = VECTOR_SHUFFLE A, B, RMask
25625 // Check that the masks correspond to performing a horizontal operation.
25626 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25627 for (unsigned i = 0; i != NumLaneElts; ++i) {
25628 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25630 // Ignore any UNDEF components.
25631 if (LIdx < 0 || RIdx < 0 ||
25632 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25633 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25636 // Check that successive elements are being operated on. If not, this is
25637 // not a horizontal operation.
25638 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25639 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25640 if (!(LIdx == Index && RIdx == Index + 1) &&
25641 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25646 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25647 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25651 /// Do target-specific dag combines on floating point adds.
25652 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25653 const X86Subtarget *Subtarget) {
25654 EVT VT = N->getValueType(0);
25655 SDValue LHS = N->getOperand(0);
25656 SDValue RHS = N->getOperand(1);
25658 // Try to synthesize horizontal adds from adds of shuffles.
25659 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25660 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25661 isHorizontalBinOp(LHS, RHS, true))
25662 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25666 /// Do target-specific dag combines on floating point subs.
25667 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25668 const X86Subtarget *Subtarget) {
25669 EVT VT = N->getValueType(0);
25670 SDValue LHS = N->getOperand(0);
25671 SDValue RHS = N->getOperand(1);
25673 // Try to synthesize horizontal subs from subs of shuffles.
25674 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25675 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25676 isHorizontalBinOp(LHS, RHS, false))
25677 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25681 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25682 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25683 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25685 // F[X]OR(0.0, x) -> x
25686 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25687 if (C->getValueAPF().isPosZero())
25688 return N->getOperand(1);
25690 // F[X]OR(x, 0.0) -> x
25691 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25692 if (C->getValueAPF().isPosZero())
25693 return N->getOperand(0);
25697 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25698 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25699 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25701 // Only perform optimizations if UnsafeMath is used.
25702 if (!DAG.getTarget().Options.UnsafeFPMath)
25705 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25706 // into FMINC and FMAXC, which are Commutative operations.
25707 unsigned NewOp = 0;
25708 switch (N->getOpcode()) {
25709 default: llvm_unreachable("unknown opcode");
25710 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25711 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25714 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25715 N->getOperand(0), N->getOperand(1));
25718 /// Do target-specific dag combines on X86ISD::FAND nodes.
25719 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25720 // FAND(0.0, x) -> 0.0
25721 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25722 if (C->getValueAPF().isPosZero())
25723 return N->getOperand(0);
25725 // FAND(x, 0.0) -> 0.0
25726 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25727 if (C->getValueAPF().isPosZero())
25728 return N->getOperand(1);
25733 /// Do target-specific dag combines on X86ISD::FANDN nodes
25734 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25735 // FANDN(0.0, x) -> x
25736 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25737 if (C->getValueAPF().isPosZero())
25738 return N->getOperand(1);
25740 // FANDN(x, 0.0) -> 0.0
25741 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25742 if (C->getValueAPF().isPosZero())
25743 return N->getOperand(1);
25748 static SDValue PerformBTCombine(SDNode *N,
25750 TargetLowering::DAGCombinerInfo &DCI) {
25751 // BT ignores high bits in the bit index operand.
25752 SDValue Op1 = N->getOperand(1);
25753 if (Op1.hasOneUse()) {
25754 unsigned BitWidth = Op1.getValueSizeInBits();
25755 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25756 APInt KnownZero, KnownOne;
25757 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25758 !DCI.isBeforeLegalizeOps());
25759 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25760 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25761 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25762 DCI.CommitTargetLoweringOpt(TLO);
25767 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25768 SDValue Op = N->getOperand(0);
25769 if (Op.getOpcode() == ISD::BITCAST)
25770 Op = Op.getOperand(0);
25771 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25772 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25773 VT.getVectorElementType().getSizeInBits() ==
25774 OpVT.getVectorElementType().getSizeInBits()) {
25775 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25780 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25781 const X86Subtarget *Subtarget) {
25782 EVT VT = N->getValueType(0);
25783 if (!VT.isVector())
25786 SDValue N0 = N->getOperand(0);
25787 SDValue N1 = N->getOperand(1);
25788 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25791 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25792 // both SSE and AVX2 since there is no sign-extended shift right
25793 // operation on a vector with 64-bit elements.
25794 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25795 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25796 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25797 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25798 SDValue N00 = N0.getOperand(0);
25800 // EXTLOAD has a better solution on AVX2,
25801 // it may be replaced with X86ISD::VSEXT node.
25802 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25803 if (!ISD::isNormalLoad(N00.getNode()))
25806 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25807 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25809 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25815 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25816 TargetLowering::DAGCombinerInfo &DCI,
25817 const X86Subtarget *Subtarget) {
25818 SDValue N0 = N->getOperand(0);
25819 EVT VT = N->getValueType(0);
25821 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25822 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25823 // This exposes the sext to the sdivrem lowering, so that it directly extends
25824 // from AH (which we otherwise need to do contortions to access).
25825 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25826 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25828 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25829 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25830 N0.getOperand(0), N0.getOperand(1));
25831 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25832 return R.getValue(1);
25835 if (!DCI.isBeforeLegalizeOps())
25838 if (!Subtarget->hasFp256())
25841 if (VT.isVector() && VT.getSizeInBits() == 256) {
25842 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25850 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25851 const X86Subtarget* Subtarget) {
25853 EVT VT = N->getValueType(0);
25855 // Let legalize expand this if it isn't a legal type yet.
25856 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25859 EVT ScalarVT = VT.getScalarType();
25860 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25861 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25864 SDValue A = N->getOperand(0);
25865 SDValue B = N->getOperand(1);
25866 SDValue C = N->getOperand(2);
25868 bool NegA = (A.getOpcode() == ISD::FNEG);
25869 bool NegB = (B.getOpcode() == ISD::FNEG);
25870 bool NegC = (C.getOpcode() == ISD::FNEG);
25872 // Negative multiplication when NegA xor NegB
25873 bool NegMul = (NegA != NegB);
25875 A = A.getOperand(0);
25877 B = B.getOperand(0);
25879 C = C.getOperand(0);
25883 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25885 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25887 return DAG.getNode(Opcode, dl, VT, A, B, C);
25890 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25891 TargetLowering::DAGCombinerInfo &DCI,
25892 const X86Subtarget *Subtarget) {
25893 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25894 // (and (i32 x86isd::setcc_carry), 1)
25895 // This eliminates the zext. This transformation is necessary because
25896 // ISD::SETCC is always legalized to i8.
25898 SDValue N0 = N->getOperand(0);
25899 EVT VT = N->getValueType(0);
25901 if (N0.getOpcode() == ISD::AND &&
25903 N0.getOperand(0).hasOneUse()) {
25904 SDValue N00 = N0.getOperand(0);
25905 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25906 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25907 if (!C || C->getZExtValue() != 1)
25909 return DAG.getNode(ISD::AND, dl, VT,
25910 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25911 N00.getOperand(0), N00.getOperand(1)),
25912 DAG.getConstant(1, VT));
25916 if (N0.getOpcode() == ISD::TRUNCATE &&
25918 N0.getOperand(0).hasOneUse()) {
25919 SDValue N00 = N0.getOperand(0);
25920 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25921 return DAG.getNode(ISD::AND, dl, VT,
25922 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25923 N00.getOperand(0), N00.getOperand(1)),
25924 DAG.getConstant(1, VT));
25927 if (VT.is256BitVector()) {
25928 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25933 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25934 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25935 // This exposes the zext to the udivrem lowering, so that it directly extends
25936 // from AH (which we otherwise need to do contortions to access).
25937 if (N0.getOpcode() == ISD::UDIVREM &&
25938 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25939 (VT == MVT::i32 || VT == MVT::i64)) {
25940 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25941 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25942 N0.getOperand(0), N0.getOperand(1));
25943 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25944 return R.getValue(1);
25950 // Optimize x == -y --> x+y == 0
25951 // x != -y --> x+y != 0
25952 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25953 const X86Subtarget* Subtarget) {
25954 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25955 SDValue LHS = N->getOperand(0);
25956 SDValue RHS = N->getOperand(1);
25957 EVT VT = N->getValueType(0);
25960 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
25961 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
25962 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
25963 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25964 LHS.getValueType(), RHS, LHS.getOperand(1));
25965 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25966 addV, DAG.getConstant(0, addV.getValueType()), CC);
25968 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
25969 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
25970 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
25971 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25972 RHS.getValueType(), LHS, RHS.getOperand(1));
25973 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25974 addV, DAG.getConstant(0, addV.getValueType()), CC);
25977 if (VT.getScalarType() == MVT::i1) {
25978 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
25979 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25980 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
25981 if (!IsSEXT0 && !IsVZero0)
25983 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
25984 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25985 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
25987 if (!IsSEXT1 && !IsVZero1)
25990 if (IsSEXT0 && IsVZero1) {
25991 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
25992 if (CC == ISD::SETEQ)
25993 return DAG.getNOT(DL, LHS.getOperand(0), VT);
25994 return LHS.getOperand(0);
25996 if (IsSEXT1 && IsVZero0) {
25997 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
25998 if (CC == ISD::SETEQ)
25999 return DAG.getNOT(DL, RHS.getOperand(0), VT);
26000 return RHS.getOperand(0);
26007 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
26008 const X86Subtarget *Subtarget) {
26010 MVT VT = N->getOperand(1)->getSimpleValueType(0);
26011 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
26012 "X86insertps is only defined for v4x32");
26014 SDValue Ld = N->getOperand(1);
26015 if (MayFoldLoad(Ld)) {
26016 // Extract the countS bits from the immediate so we can get the proper
26017 // address when narrowing the vector load to a specific element.
26018 // When the second source op is a memory address, interps doesn't use
26019 // countS and just gets an f32 from that address.
26020 unsigned DestIndex =
26021 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
26022 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
26026 // Create this as a scalar to vector to match the instruction pattern.
26027 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
26028 // countS bits are ignored when loading from memory on insertps, which
26029 // means we don't need to explicitly set them to 0.
26030 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
26031 LoadScalarToVector, N->getOperand(2));
26034 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
26035 // as "sbb reg,reg", since it can be extended without zext and produces
26036 // an all-ones bit which is more useful than 0/1 in some cases.
26037 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
26040 return DAG.getNode(ISD::AND, DL, VT,
26041 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
26042 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
26043 DAG.getConstant(1, VT));
26044 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
26045 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
26046 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
26047 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
26050 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
26051 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
26052 TargetLowering::DAGCombinerInfo &DCI,
26053 const X86Subtarget *Subtarget) {
26055 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
26056 SDValue EFLAGS = N->getOperand(1);
26058 if (CC == X86::COND_A) {
26059 // Try to convert COND_A into COND_B in an attempt to facilitate
26060 // materializing "setb reg".
26062 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
26063 // cannot take an immediate as its first operand.
26065 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
26066 EFLAGS.getValueType().isInteger() &&
26067 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
26068 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
26069 EFLAGS.getNode()->getVTList(),
26070 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
26071 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
26072 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
26076 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
26077 // a zext and produces an all-ones bit which is more useful than 0/1 in some
26079 if (CC == X86::COND_B)
26080 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
26084 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26085 if (Flags.getNode()) {
26086 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26087 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
26093 // Optimize branch condition evaluation.
26095 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
26096 TargetLowering::DAGCombinerInfo &DCI,
26097 const X86Subtarget *Subtarget) {
26099 SDValue Chain = N->getOperand(0);
26100 SDValue Dest = N->getOperand(1);
26101 SDValue EFLAGS = N->getOperand(3);
26102 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
26106 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26107 if (Flags.getNode()) {
26108 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26109 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
26116 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
26117 SelectionDAG &DAG) {
26118 // Take advantage of vector comparisons producing 0 or -1 in each lane to
26119 // optimize away operation when it's from a constant.
26121 // The general transformation is:
26122 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
26123 // AND(VECTOR_CMP(x,y), constant2)
26124 // constant2 = UNARYOP(constant)
26126 // Early exit if this isn't a vector operation, the operand of the
26127 // unary operation isn't a bitwise AND, or if the sizes of the operations
26128 // aren't the same.
26129 EVT VT = N->getValueType(0);
26130 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
26131 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
26132 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
26135 // Now check that the other operand of the AND is a constant. We could
26136 // make the transformation for non-constant splats as well, but it's unclear
26137 // that would be a benefit as it would not eliminate any operations, just
26138 // perform one more step in scalar code before moving to the vector unit.
26139 if (BuildVectorSDNode *BV =
26140 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
26141 // Bail out if the vector isn't a constant.
26142 if (!BV->isConstant())
26145 // Everything checks out. Build up the new and improved node.
26147 EVT IntVT = BV->getValueType(0);
26148 // Create a new constant of the appropriate type for the transformed
26150 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
26151 // The AND node needs bitcasts to/from an integer vector type around it.
26152 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
26153 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
26154 N->getOperand(0)->getOperand(0), MaskConst);
26155 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
26162 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
26163 const X86Subtarget *Subtarget) {
26164 // First try to optimize away the conversion entirely when it's
26165 // conditionally from a constant. Vectors only.
26166 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
26167 if (Res != SDValue())
26170 // Now move on to more general possibilities.
26171 SDValue Op0 = N->getOperand(0);
26172 EVT InVT = Op0->getValueType(0);
26174 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
26175 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
26177 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
26178 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
26179 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
26182 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
26183 // a 32-bit target where SSE doesn't support i64->FP operations.
26184 if (Op0.getOpcode() == ISD::LOAD) {
26185 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
26186 EVT VT = Ld->getValueType(0);
26187 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
26188 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
26189 !Subtarget->is64Bit() && VT == MVT::i64) {
26190 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
26191 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
26192 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
26199 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
26200 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
26201 X86TargetLowering::DAGCombinerInfo &DCI) {
26202 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
26203 // the result is either zero or one (depending on the input carry bit).
26204 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
26205 if (X86::isZeroNode(N->getOperand(0)) &&
26206 X86::isZeroNode(N->getOperand(1)) &&
26207 // We don't have a good way to replace an EFLAGS use, so only do this when
26209 SDValue(N, 1).use_empty()) {
26211 EVT VT = N->getValueType(0);
26212 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
26213 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
26214 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
26215 DAG.getConstant(X86::COND_B,MVT::i8),
26217 DAG.getConstant(1, VT));
26218 return DCI.CombineTo(N, Res1, CarryOut);
26224 // fold (add Y, (sete X, 0)) -> adc 0, Y
26225 // (add Y, (setne X, 0)) -> sbb -1, Y
26226 // (sub (sete X, 0), Y) -> sbb 0, Y
26227 // (sub (setne X, 0), Y) -> adc -1, Y
26228 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
26231 // Look through ZExts.
26232 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
26233 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
26236 SDValue SetCC = Ext.getOperand(0);
26237 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
26240 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
26241 if (CC != X86::COND_E && CC != X86::COND_NE)
26244 SDValue Cmp = SetCC.getOperand(1);
26245 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
26246 !X86::isZeroNode(Cmp.getOperand(1)) ||
26247 !Cmp.getOperand(0).getValueType().isInteger())
26250 SDValue CmpOp0 = Cmp.getOperand(0);
26251 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
26252 DAG.getConstant(1, CmpOp0.getValueType()));
26254 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
26255 if (CC == X86::COND_NE)
26256 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
26257 DL, OtherVal.getValueType(), OtherVal,
26258 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
26259 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
26260 DL, OtherVal.getValueType(), OtherVal,
26261 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
26264 /// PerformADDCombine - Do target-specific dag combines on integer adds.
26265 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
26266 const X86Subtarget *Subtarget) {
26267 EVT VT = N->getValueType(0);
26268 SDValue Op0 = N->getOperand(0);
26269 SDValue Op1 = N->getOperand(1);
26271 // Try to synthesize horizontal adds from adds of shuffles.
26272 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26273 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26274 isHorizontalBinOp(Op0, Op1, true))
26275 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
26277 return OptimizeConditionalInDecrement(N, DAG);
26280 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
26281 const X86Subtarget *Subtarget) {
26282 SDValue Op0 = N->getOperand(0);
26283 SDValue Op1 = N->getOperand(1);
26285 // X86 can't encode an immediate LHS of a sub. See if we can push the
26286 // negation into a preceding instruction.
26287 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
26288 // If the RHS of the sub is a XOR with one use and a constant, invert the
26289 // immediate. Then add one to the LHS of the sub so we can turn
26290 // X-Y -> X+~Y+1, saving one register.
26291 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
26292 isa<ConstantSDNode>(Op1.getOperand(1))) {
26293 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
26294 EVT VT = Op0.getValueType();
26295 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
26297 DAG.getConstant(~XorC, VT));
26298 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
26299 DAG.getConstant(C->getAPIntValue()+1, VT));
26303 // Try to synthesize horizontal adds from adds of shuffles.
26304 EVT VT = N->getValueType(0);
26305 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26306 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26307 isHorizontalBinOp(Op0, Op1, true))
26308 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
26310 return OptimizeConditionalInDecrement(N, DAG);
26313 /// performVZEXTCombine - Performs build vector combines
26314 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
26315 TargetLowering::DAGCombinerInfo &DCI,
26316 const X86Subtarget *Subtarget) {
26318 MVT VT = N->getSimpleValueType(0);
26319 SDValue Op = N->getOperand(0);
26320 MVT OpVT = Op.getSimpleValueType();
26321 MVT OpEltVT = OpVT.getVectorElementType();
26322 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
26324 // (vzext (bitcast (vzext (x)) -> (vzext x)
26326 while (V.getOpcode() == ISD::BITCAST)
26327 V = V.getOperand(0);
26329 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
26330 MVT InnerVT = V.getSimpleValueType();
26331 MVT InnerEltVT = InnerVT.getVectorElementType();
26333 // If the element sizes match exactly, we can just do one larger vzext. This
26334 // is always an exact type match as vzext operates on integer types.
26335 if (OpEltVT == InnerEltVT) {
26336 assert(OpVT == InnerVT && "Types must match for vzext!");
26337 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
26340 // The only other way we can combine them is if only a single element of the
26341 // inner vzext is used in the input to the outer vzext.
26342 if (InnerEltVT.getSizeInBits() < InputBits)
26345 // In this case, the inner vzext is completely dead because we're going to
26346 // only look at bits inside of the low element. Just do the outer vzext on
26347 // a bitcast of the input to the inner.
26348 return DAG.getNode(X86ISD::VZEXT, DL, VT,
26349 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
26352 // Check if we can bypass extracting and re-inserting an element of an input
26353 // vector. Essentialy:
26354 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
26355 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
26356 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
26357 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
26358 SDValue ExtractedV = V.getOperand(0);
26359 SDValue OrigV = ExtractedV.getOperand(0);
26360 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
26361 if (ExtractIdx->getZExtValue() == 0) {
26362 MVT OrigVT = OrigV.getSimpleValueType();
26363 // Extract a subvector if necessary...
26364 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
26365 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
26366 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
26367 OrigVT.getVectorNumElements() / Ratio);
26368 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
26369 DAG.getIntPtrConstant(0));
26371 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
26372 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
26379 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
26380 DAGCombinerInfo &DCI) const {
26381 SelectionDAG &DAG = DCI.DAG;
26382 switch (N->getOpcode()) {
26384 case ISD::EXTRACT_VECTOR_ELT:
26385 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
26388 case X86ISD::SHRUNKBLEND:
26389 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
26390 case ISD::BITCAST: return PerformBITCASTCombine(N, DAG);
26391 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
26392 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
26393 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
26394 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
26395 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
26398 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
26399 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
26400 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
26401 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26402 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26403 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26404 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26405 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26406 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
26407 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26408 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26410 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26412 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26413 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26414 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26415 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26416 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26417 case ISD::ANY_EXTEND:
26418 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26419 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26420 case ISD::SIGN_EXTEND_INREG:
26421 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26422 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26423 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26424 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26425 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26426 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26427 case X86ISD::SHUFP: // Handle all target specific shuffles
26428 case X86ISD::PALIGNR:
26429 case X86ISD::UNPCKH:
26430 case X86ISD::UNPCKL:
26431 case X86ISD::MOVHLPS:
26432 case X86ISD::MOVLHPS:
26433 case X86ISD::PSHUFB:
26434 case X86ISD::PSHUFD:
26435 case X86ISD::PSHUFHW:
26436 case X86ISD::PSHUFLW:
26437 case X86ISD::MOVSS:
26438 case X86ISD::MOVSD:
26439 case X86ISD::VPERMILPI:
26440 case X86ISD::VPERM2X128:
26441 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26442 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26443 case ISD::INTRINSIC_WO_CHAIN:
26444 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26445 case X86ISD::INSERTPS: {
26446 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
26447 return PerformINSERTPSCombine(N, DAG, Subtarget);
26450 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26456 /// isTypeDesirableForOp - Return true if the target has native support for
26457 /// the specified value type and it is 'desirable' to use the type for the
26458 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26459 /// instruction encodings are longer and some i16 instructions are slow.
26460 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26461 if (!isTypeLegal(VT))
26463 if (VT != MVT::i16)
26470 case ISD::SIGN_EXTEND:
26471 case ISD::ZERO_EXTEND:
26472 case ISD::ANY_EXTEND:
26485 /// IsDesirableToPromoteOp - This method query the target whether it is
26486 /// beneficial for dag combiner to promote the specified node. If true, it
26487 /// should return the desired promotion type by reference.
26488 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26489 EVT VT = Op.getValueType();
26490 if (VT != MVT::i16)
26493 bool Promote = false;
26494 bool Commute = false;
26495 switch (Op.getOpcode()) {
26498 LoadSDNode *LD = cast<LoadSDNode>(Op);
26499 // If the non-extending load has a single use and it's not live out, then it
26500 // might be folded.
26501 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26502 Op.hasOneUse()*/) {
26503 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26504 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26505 // The only case where we'd want to promote LOAD (rather then it being
26506 // promoted as an operand is when it's only use is liveout.
26507 if (UI->getOpcode() != ISD::CopyToReg)
26514 case ISD::SIGN_EXTEND:
26515 case ISD::ZERO_EXTEND:
26516 case ISD::ANY_EXTEND:
26521 SDValue N0 = Op.getOperand(0);
26522 // Look out for (store (shl (load), x)).
26523 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26536 SDValue N0 = Op.getOperand(0);
26537 SDValue N1 = Op.getOperand(1);
26538 if (!Commute && MayFoldLoad(N1))
26540 // Avoid disabling potential load folding opportunities.
26541 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26543 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26553 //===----------------------------------------------------------------------===//
26554 // X86 Inline Assembly Support
26555 //===----------------------------------------------------------------------===//
26558 // Helper to match a string separated by whitespace.
26559 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26560 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26562 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26563 StringRef piece(*args[i]);
26564 if (!s.startswith(piece)) // Check if the piece matches.
26567 s = s.substr(piece.size());
26568 StringRef::size_type pos = s.find_first_not_of(" \t");
26569 if (pos == 0) // We matched a prefix.
26577 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26580 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26582 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26583 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26584 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26585 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26587 if (AsmPieces.size() == 3)
26589 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26596 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26597 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26599 std::string AsmStr = IA->getAsmString();
26601 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26602 if (!Ty || Ty->getBitWidth() % 16 != 0)
26605 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26606 SmallVector<StringRef, 4> AsmPieces;
26607 SplitString(AsmStr, AsmPieces, ";\n");
26609 switch (AsmPieces.size()) {
26610 default: return false;
26612 // FIXME: this should verify that we are targeting a 486 or better. If not,
26613 // we will turn this bswap into something that will be lowered to logical
26614 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26615 // lower so don't worry about this.
26617 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26618 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26619 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26620 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26621 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26622 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26623 // No need to check constraints, nothing other than the equivalent of
26624 // "=r,0" would be valid here.
26625 return IntrinsicLowering::LowerToByteSwap(CI);
26628 // rorw $$8, ${0:w} --> llvm.bswap.i16
26629 if (CI->getType()->isIntegerTy(16) &&
26630 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26631 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26632 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26634 const std::string &ConstraintsStr = IA->getConstraintString();
26635 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26636 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26637 if (clobbersFlagRegisters(AsmPieces))
26638 return IntrinsicLowering::LowerToByteSwap(CI);
26642 if (CI->getType()->isIntegerTy(32) &&
26643 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26644 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26645 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26646 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26648 const std::string &ConstraintsStr = IA->getConstraintString();
26649 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26650 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26651 if (clobbersFlagRegisters(AsmPieces))
26652 return IntrinsicLowering::LowerToByteSwap(CI);
26655 if (CI->getType()->isIntegerTy(64)) {
26656 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26657 if (Constraints.size() >= 2 &&
26658 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26659 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26660 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26661 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26662 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26663 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26664 return IntrinsicLowering::LowerToByteSwap(CI);
26672 /// getConstraintType - Given a constraint letter, return the type of
26673 /// constraint it is for this target.
26674 X86TargetLowering::ConstraintType
26675 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26676 if (Constraint.size() == 1) {
26677 switch (Constraint[0]) {
26688 return C_RegisterClass;
26712 return TargetLowering::getConstraintType(Constraint);
26715 /// Examine constraint type and operand type and determine a weight value.
26716 /// This object must already have been set up with the operand type
26717 /// and the current alternative constraint selected.
26718 TargetLowering::ConstraintWeight
26719 X86TargetLowering::getSingleConstraintMatchWeight(
26720 AsmOperandInfo &info, const char *constraint) const {
26721 ConstraintWeight weight = CW_Invalid;
26722 Value *CallOperandVal = info.CallOperandVal;
26723 // If we don't have a value, we can't do a match,
26724 // but allow it at the lowest weight.
26725 if (!CallOperandVal)
26727 Type *type = CallOperandVal->getType();
26728 // Look at the constraint type.
26729 switch (*constraint) {
26731 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26742 if (CallOperandVal->getType()->isIntegerTy())
26743 weight = CW_SpecificReg;
26748 if (type->isFloatingPointTy())
26749 weight = CW_SpecificReg;
26752 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26753 weight = CW_SpecificReg;
26757 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26758 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26759 weight = CW_Register;
26762 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26763 if (C->getZExtValue() <= 31)
26764 weight = CW_Constant;
26768 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26769 if (C->getZExtValue() <= 63)
26770 weight = CW_Constant;
26774 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26775 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26776 weight = CW_Constant;
26780 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26781 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26782 weight = CW_Constant;
26786 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26787 if (C->getZExtValue() <= 3)
26788 weight = CW_Constant;
26792 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26793 if (C->getZExtValue() <= 0xff)
26794 weight = CW_Constant;
26799 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26800 weight = CW_Constant;
26804 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26805 if ((C->getSExtValue() >= -0x80000000LL) &&
26806 (C->getSExtValue() <= 0x7fffffffLL))
26807 weight = CW_Constant;
26811 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26812 if (C->getZExtValue() <= 0xffffffff)
26813 weight = CW_Constant;
26820 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26821 /// with another that has more specific requirements based on the type of the
26822 /// corresponding operand.
26823 const char *X86TargetLowering::
26824 LowerXConstraint(EVT ConstraintVT) const {
26825 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26826 // 'f' like normal targets.
26827 if (ConstraintVT.isFloatingPoint()) {
26828 if (Subtarget->hasSSE2())
26830 if (Subtarget->hasSSE1())
26834 return TargetLowering::LowerXConstraint(ConstraintVT);
26837 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26838 /// vector. If it is invalid, don't add anything to Ops.
26839 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26840 std::string &Constraint,
26841 std::vector<SDValue>&Ops,
26842 SelectionDAG &DAG) const {
26845 // Only support length 1 constraints for now.
26846 if (Constraint.length() > 1) return;
26848 char ConstraintLetter = Constraint[0];
26849 switch (ConstraintLetter) {
26852 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26853 if (C->getZExtValue() <= 31) {
26854 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26860 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26861 if (C->getZExtValue() <= 63) {
26862 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26868 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26869 if (isInt<8>(C->getSExtValue())) {
26870 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26876 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26877 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26878 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26879 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26885 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26886 if (C->getZExtValue() <= 3) {
26887 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26893 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26894 if (C->getZExtValue() <= 255) {
26895 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26901 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26902 if (C->getZExtValue() <= 127) {
26903 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26909 // 32-bit signed value
26910 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26911 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26912 C->getSExtValue())) {
26913 // Widen to 64 bits here to get it sign extended.
26914 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26917 // FIXME gcc accepts some relocatable values here too, but only in certain
26918 // memory models; it's complicated.
26923 // 32-bit unsigned value
26924 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26925 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26926 C->getZExtValue())) {
26927 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26931 // FIXME gcc accepts some relocatable values here too, but only in certain
26932 // memory models; it's complicated.
26936 // Literal immediates are always ok.
26937 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26938 // Widen to 64 bits here to get it sign extended.
26939 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26943 // In any sort of PIC mode addresses need to be computed at runtime by
26944 // adding in a register or some sort of table lookup. These can't
26945 // be used as immediates.
26946 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26949 // If we are in non-pic codegen mode, we allow the address of a global (with
26950 // an optional displacement) to be used with 'i'.
26951 GlobalAddressSDNode *GA = nullptr;
26952 int64_t Offset = 0;
26954 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26956 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26957 Offset += GA->getOffset();
26959 } else if (Op.getOpcode() == ISD::ADD) {
26960 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26961 Offset += C->getZExtValue();
26962 Op = Op.getOperand(0);
26965 } else if (Op.getOpcode() == ISD::SUB) {
26966 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26967 Offset += -C->getZExtValue();
26968 Op = Op.getOperand(0);
26973 // Otherwise, this isn't something we can handle, reject it.
26977 const GlobalValue *GV = GA->getGlobal();
26978 // If we require an extra load to get this address, as in PIC mode, we
26979 // can't accept it.
26980 if (isGlobalStubReference(
26981 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
26984 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
26985 GA->getValueType(0), Offset);
26990 if (Result.getNode()) {
26991 Ops.push_back(Result);
26994 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
26997 std::pair<unsigned, const TargetRegisterClass*>
26998 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
27000 // First, see if this is a constraint that directly corresponds to an LLVM
27002 if (Constraint.size() == 1) {
27003 // GCC Constraint Letters
27004 switch (Constraint[0]) {
27006 // TODO: Slight differences here in allocation order and leaving
27007 // RIP in the class. Do they matter any more here than they do
27008 // in the normal allocation?
27009 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
27010 if (Subtarget->is64Bit()) {
27011 if (VT == MVT::i32 || VT == MVT::f32)
27012 return std::make_pair(0U, &X86::GR32RegClass);
27013 if (VT == MVT::i16)
27014 return std::make_pair(0U, &X86::GR16RegClass);
27015 if (VT == MVT::i8 || VT == MVT::i1)
27016 return std::make_pair(0U, &X86::GR8RegClass);
27017 if (VT == MVT::i64 || VT == MVT::f64)
27018 return std::make_pair(0U, &X86::GR64RegClass);
27021 // 32-bit fallthrough
27022 case 'Q': // Q_REGS
27023 if (VT == MVT::i32 || VT == MVT::f32)
27024 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
27025 if (VT == MVT::i16)
27026 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
27027 if (VT == MVT::i8 || VT == MVT::i1)
27028 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
27029 if (VT == MVT::i64)
27030 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
27032 case 'r': // GENERAL_REGS
27033 case 'l': // INDEX_REGS
27034 if (VT == MVT::i8 || VT == MVT::i1)
27035 return std::make_pair(0U, &X86::GR8RegClass);
27036 if (VT == MVT::i16)
27037 return std::make_pair(0U, &X86::GR16RegClass);
27038 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
27039 return std::make_pair(0U, &X86::GR32RegClass);
27040 return std::make_pair(0U, &X86::GR64RegClass);
27041 case 'R': // LEGACY_REGS
27042 if (VT == MVT::i8 || VT == MVT::i1)
27043 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
27044 if (VT == MVT::i16)
27045 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
27046 if (VT == MVT::i32 || !Subtarget->is64Bit())
27047 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
27048 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
27049 case 'f': // FP Stack registers.
27050 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
27051 // value to the correct fpstack register class.
27052 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
27053 return std::make_pair(0U, &X86::RFP32RegClass);
27054 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
27055 return std::make_pair(0U, &X86::RFP64RegClass);
27056 return std::make_pair(0U, &X86::RFP80RegClass);
27057 case 'y': // MMX_REGS if MMX allowed.
27058 if (!Subtarget->hasMMX()) break;
27059 return std::make_pair(0U, &X86::VR64RegClass);
27060 case 'Y': // SSE_REGS if SSE2 allowed
27061 if (!Subtarget->hasSSE2()) break;
27063 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
27064 if (!Subtarget->hasSSE1()) break;
27066 switch (VT.SimpleTy) {
27068 // Scalar SSE types.
27071 return std::make_pair(0U, &X86::FR32RegClass);
27074 return std::make_pair(0U, &X86::FR64RegClass);
27082 return std::make_pair(0U, &X86::VR128RegClass);
27090 return std::make_pair(0U, &X86::VR256RegClass);
27095 return std::make_pair(0U, &X86::VR512RegClass);
27101 // Use the default implementation in TargetLowering to convert the register
27102 // constraint into a member of a register class.
27103 std::pair<unsigned, const TargetRegisterClass*> Res;
27104 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
27106 // Not found as a standard register?
27108 // Map st(0) -> st(7) -> ST0
27109 if (Constraint.size() == 7 && Constraint[0] == '{' &&
27110 tolower(Constraint[1]) == 's' &&
27111 tolower(Constraint[2]) == 't' &&
27112 Constraint[3] == '(' &&
27113 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
27114 Constraint[5] == ')' &&
27115 Constraint[6] == '}') {
27117 Res.first = X86::FP0+Constraint[4]-'0';
27118 Res.second = &X86::RFP80RegClass;
27122 // GCC allows "st(0)" to be called just plain "st".
27123 if (StringRef("{st}").equals_lower(Constraint)) {
27124 Res.first = X86::FP0;
27125 Res.second = &X86::RFP80RegClass;
27130 if (StringRef("{flags}").equals_lower(Constraint)) {
27131 Res.first = X86::EFLAGS;
27132 Res.second = &X86::CCRRegClass;
27136 // 'A' means EAX + EDX.
27137 if (Constraint == "A") {
27138 Res.first = X86::EAX;
27139 Res.second = &X86::GR32_ADRegClass;
27145 // Otherwise, check to see if this is a register class of the wrong value
27146 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
27147 // turn into {ax},{dx}.
27148 if (Res.second->hasType(VT))
27149 return Res; // Correct type already, nothing to do.
27151 // All of the single-register GCC register classes map their values onto
27152 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
27153 // really want an 8-bit or 32-bit register, map to the appropriate register
27154 // class and return the appropriate register.
27155 if (Res.second == &X86::GR16RegClass) {
27156 if (VT == MVT::i8 || VT == MVT::i1) {
27157 unsigned DestReg = 0;
27158 switch (Res.first) {
27160 case X86::AX: DestReg = X86::AL; break;
27161 case X86::DX: DestReg = X86::DL; break;
27162 case X86::CX: DestReg = X86::CL; break;
27163 case X86::BX: DestReg = X86::BL; break;
27166 Res.first = DestReg;
27167 Res.second = &X86::GR8RegClass;
27169 } else if (VT == MVT::i32 || VT == MVT::f32) {
27170 unsigned DestReg = 0;
27171 switch (Res.first) {
27173 case X86::AX: DestReg = X86::EAX; break;
27174 case X86::DX: DestReg = X86::EDX; break;
27175 case X86::CX: DestReg = X86::ECX; break;
27176 case X86::BX: DestReg = X86::EBX; break;
27177 case X86::SI: DestReg = X86::ESI; break;
27178 case X86::DI: DestReg = X86::EDI; break;
27179 case X86::BP: DestReg = X86::EBP; break;
27180 case X86::SP: DestReg = X86::ESP; break;
27183 Res.first = DestReg;
27184 Res.second = &X86::GR32RegClass;
27186 } else if (VT == MVT::i64 || VT == MVT::f64) {
27187 unsigned DestReg = 0;
27188 switch (Res.first) {
27190 case X86::AX: DestReg = X86::RAX; break;
27191 case X86::DX: DestReg = X86::RDX; break;
27192 case X86::CX: DestReg = X86::RCX; break;
27193 case X86::BX: DestReg = X86::RBX; break;
27194 case X86::SI: DestReg = X86::RSI; break;
27195 case X86::DI: DestReg = X86::RDI; break;
27196 case X86::BP: DestReg = X86::RBP; break;
27197 case X86::SP: DestReg = X86::RSP; break;
27200 Res.first = DestReg;
27201 Res.second = &X86::GR64RegClass;
27204 } else if (Res.second == &X86::FR32RegClass ||
27205 Res.second == &X86::FR64RegClass ||
27206 Res.second == &X86::VR128RegClass ||
27207 Res.second == &X86::VR256RegClass ||
27208 Res.second == &X86::FR32XRegClass ||
27209 Res.second == &X86::FR64XRegClass ||
27210 Res.second == &X86::VR128XRegClass ||
27211 Res.second == &X86::VR256XRegClass ||
27212 Res.second == &X86::VR512RegClass) {
27213 // Handle references to XMM physical registers that got mapped into the
27214 // wrong class. This can happen with constraints like {xmm0} where the
27215 // target independent register mapper will just pick the first match it can
27216 // find, ignoring the required type.
27218 if (VT == MVT::f32 || VT == MVT::i32)
27219 Res.second = &X86::FR32RegClass;
27220 else if (VT == MVT::f64 || VT == MVT::i64)
27221 Res.second = &X86::FR64RegClass;
27222 else if (X86::VR128RegClass.hasType(VT))
27223 Res.second = &X86::VR128RegClass;
27224 else if (X86::VR256RegClass.hasType(VT))
27225 Res.second = &X86::VR256RegClass;
27226 else if (X86::VR512RegClass.hasType(VT))
27227 Res.second = &X86::VR512RegClass;
27233 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
27235 // Scaling factors are not free at all.
27236 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
27237 // will take 2 allocations in the out of order engine instead of 1
27238 // for plain addressing mode, i.e. inst (reg1).
27240 // vaddps (%rsi,%drx), %ymm0, %ymm1
27241 // Requires two allocations (one for the load, one for the computation)
27243 // vaddps (%rsi), %ymm0, %ymm1
27244 // Requires just 1 allocation, i.e., freeing allocations for other operations
27245 // and having less micro operations to execute.
27247 // For some X86 architectures, this is even worse because for instance for
27248 // stores, the complex addressing mode forces the instruction to use the
27249 // "load" ports instead of the dedicated "store" port.
27250 // E.g., on Haswell:
27251 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
27252 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
27253 if (isLegalAddressingMode(AM, Ty))
27254 // Scale represents reg2 * scale, thus account for 1
27255 // as soon as we use a second register.
27256 return AM.Scale != 0;
27260 bool X86TargetLowering::isTargetFTOL() const {
27261 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();