1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<bool> ExperimentalVectorShuffleLegality(
76 "x86-experimental-vector-shuffle-legality", cl::init(false),
77 cl::desc("Enable experimental shuffle legality based on the experimental "
78 "shuffle lowering. Should only be used with the experimental "
82 static cl::opt<int> ReciprocalEstimateRefinementSteps(
83 "x86-recip-refinement-steps", cl::init(1),
84 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
85 "result of the hardware reciprocal estimate instruction."),
88 // Forward declarations.
89 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
92 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
93 SelectionDAG &DAG, SDLoc dl,
94 unsigned vectorWidth) {
95 assert((vectorWidth == 128 || vectorWidth == 256) &&
96 "Unsupported vector width");
97 EVT VT = Vec.getValueType();
98 EVT ElVT = VT.getVectorElementType();
99 unsigned Factor = VT.getSizeInBits()/vectorWidth;
100 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
101 VT.getVectorNumElements()/Factor);
103 // Extract from UNDEF is UNDEF.
104 if (Vec.getOpcode() == ISD::UNDEF)
105 return DAG.getUNDEF(ResultVT);
107 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
108 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
110 // This is the index of the first element of the vectorWidth-bit chunk
112 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
115 // If the input is a buildvector just emit a smaller one.
116 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
117 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
118 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
122 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
125 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
126 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
127 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
128 /// instructions or a simple subregister reference. Idx is an index in the
129 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
130 /// lowering EXTRACT_VECTOR_ELT operations easier.
131 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
132 SelectionDAG &DAG, SDLoc dl) {
133 assert((Vec.getValueType().is256BitVector() ||
134 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
138 /// Generate a DAG to grab 256-bits from a 512-bit vector.
139 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
140 SelectionDAG &DAG, SDLoc dl) {
141 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
142 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
145 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
146 unsigned IdxVal, SelectionDAG &DAG,
147 SDLoc dl, unsigned vectorWidth) {
148 assert((vectorWidth == 128 || vectorWidth == 256) &&
149 "Unsupported vector width");
150 // Inserting UNDEF is Result
151 if (Vec.getOpcode() == ISD::UNDEF)
153 EVT VT = Vec.getValueType();
154 EVT ElVT = VT.getVectorElementType();
155 EVT ResultVT = Result.getValueType();
157 // Insert the relevant vectorWidth bits.
158 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
160 // This is the index of the first element of the vectorWidth-bit chunk
162 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
165 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
166 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
169 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
170 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
171 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
172 /// simple superregister reference. Idx is an index in the 128 bits
173 /// we want. It need not be aligned to a 128-bit boundary. That makes
174 /// lowering INSERT_VECTOR_ELT operations easier.
175 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
176 SelectionDAG &DAG,SDLoc dl) {
177 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
178 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
181 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
182 SelectionDAG &DAG, SDLoc dl) {
183 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
184 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
187 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
188 /// instructions. This is used because creating CONCAT_VECTOR nodes of
189 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
190 /// large BUILD_VECTORS.
191 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
198 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
199 unsigned NumElems, SelectionDAG &DAG,
201 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
202 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
205 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
206 const X86Subtarget &STI)
207 : TargetLowering(TM), Subtarget(&STI) {
208 X86ScalarSSEf64 = Subtarget->hasSSE2();
209 X86ScalarSSEf32 = Subtarget->hasSSE1();
210 TD = getDataLayout();
212 // Set up the TargetLowering object.
213 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
215 // X86 is weird. It always uses i8 for shift amounts and setcc results.
216 setBooleanContents(ZeroOrOneBooleanContent);
217 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
218 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
220 // For 64-bit, since we have so many registers, use the ILP scheduler.
221 // For 32-bit, use the register pressure specific scheduling.
222 // For Atom, always use ILP scheduling.
223 if (Subtarget->isAtom())
224 setSchedulingPreference(Sched::ILP);
225 else if (Subtarget->is64Bit())
226 setSchedulingPreference(Sched::ILP);
228 setSchedulingPreference(Sched::RegPressure);
229 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
230 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
232 // Bypass expensive divides on Atom when compiling with O2.
233 if (TM.getOptLevel() >= CodeGenOpt::Default) {
234 if (Subtarget->hasSlowDivide32())
235 addBypassSlowDiv(32, 8);
236 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
237 addBypassSlowDiv(64, 16);
240 if (Subtarget->isTargetKnownWindowsMSVC()) {
241 // Setup Windows compiler runtime calls.
242 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
243 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
244 setLibcallName(RTLIB::SREM_I64, "_allrem");
245 setLibcallName(RTLIB::UREM_I64, "_aullrem");
246 setLibcallName(RTLIB::MUL_I64, "_allmul");
247 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
248 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
249 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
250 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
251 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
253 // The _ftol2 runtime function has an unusual calling conv, which
254 // is modeled by a special pseudo-instruction.
255 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
256 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
257 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
258 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
261 if (Subtarget->isTargetDarwin()) {
262 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
263 setUseUnderscoreSetJmp(false);
264 setUseUnderscoreLongJmp(false);
265 } else if (Subtarget->isTargetWindowsGNU()) {
266 // MS runtime is weird: it exports _setjmp, but longjmp!
267 setUseUnderscoreSetJmp(true);
268 setUseUnderscoreLongJmp(false);
270 setUseUnderscoreSetJmp(true);
271 setUseUnderscoreLongJmp(true);
274 // Set up the register classes.
275 addRegisterClass(MVT::i8, &X86::GR8RegClass);
276 addRegisterClass(MVT::i16, &X86::GR16RegClass);
277 addRegisterClass(MVT::i32, &X86::GR32RegClass);
278 if (Subtarget->is64Bit())
279 addRegisterClass(MVT::i64, &X86::GR64RegClass);
281 for (MVT VT : MVT::integer_valuetypes())
282 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
284 // We don't accept any truncstore of integer registers.
285 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
286 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
287 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
288 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
289 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
290 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
292 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
294 // SETOEQ and SETUNE require checking two conditions.
295 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
296 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
297 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
298 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
299 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
300 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
302 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
304 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
305 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
306 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
308 if (Subtarget->is64Bit()) {
309 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
310 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
311 } else if (!TM.Options.UseSoftFloat) {
312 // We have an algorithm for SSE2->double, and we turn this into a
313 // 64-bit FILD followed by conditional FADD for other targets.
314 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
315 // We have an algorithm for SSE2, and we turn this into a 64-bit
316 // FILD for other targets.
317 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
320 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
322 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
323 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
325 if (!TM.Options.UseSoftFloat) {
326 // SSE has no i16 to fp conversion, only i32
327 if (X86ScalarSSEf32) {
328 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
329 // f32 and f64 cases are Legal, f80 case is not
330 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
332 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
333 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
336 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
337 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
340 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
341 // are Legal, f80 is custom lowered.
342 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
343 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
345 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
347 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
348 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
350 if (X86ScalarSSEf32) {
351 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
352 // f32 and f64 cases are Legal, f80 case is not
353 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
355 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
356 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
359 // Handle FP_TO_UINT by promoting the destination to a larger signed
361 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
362 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
363 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
365 if (Subtarget->is64Bit()) {
366 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
367 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
368 } else if (!TM.Options.UseSoftFloat) {
369 // Since AVX is a superset of SSE3, only check for SSE here.
370 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
371 // Expand FP_TO_UINT into a select.
372 // FIXME: We would like to use a Custom expander here eventually to do
373 // the optimal thing for SSE vs. the default expansion in the legalizer.
374 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
376 // With SSE3 we can use fisttpll to convert to a signed i64; without
377 // SSE, we're stuck with a fistpll.
378 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
381 if (isTargetFTOL()) {
382 // Use the _ftol2 runtime function, which has a pseudo-instruction
383 // to handle its weird calling convention.
384 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
387 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
388 if (!X86ScalarSSEf64) {
389 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
390 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
391 if (Subtarget->is64Bit()) {
392 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
393 // Without SSE, i64->f64 goes through memory.
394 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
398 // Scalar integer divide and remainder are lowered to use operations that
399 // produce two results, to match the available instructions. This exposes
400 // the two-result form to trivial CSE, which is able to combine x/y and x%y
401 // into a single instruction.
403 // Scalar integer multiply-high is also lowered to use two-result
404 // operations, to match the available instructions. However, plain multiply
405 // (low) operations are left as Legal, as there are single-result
406 // instructions for this in x86. Using the two-result multiply instructions
407 // when both high and low results are needed must be arranged by dagcombine.
408 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
410 setOperationAction(ISD::MULHS, VT, Expand);
411 setOperationAction(ISD::MULHU, VT, Expand);
412 setOperationAction(ISD::SDIV, VT, Expand);
413 setOperationAction(ISD::UDIV, VT, Expand);
414 setOperationAction(ISD::SREM, VT, Expand);
415 setOperationAction(ISD::UREM, VT, Expand);
417 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
418 setOperationAction(ISD::ADDC, VT, Custom);
419 setOperationAction(ISD::ADDE, VT, Custom);
420 setOperationAction(ISD::SUBC, VT, Custom);
421 setOperationAction(ISD::SUBE, VT, Custom);
424 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
425 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
426 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
427 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
428 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
429 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
430 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
431 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
432 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
433 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
434 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
435 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
436 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
437 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
438 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
439 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
440 if (Subtarget->is64Bit())
441 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
442 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
443 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
444 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
445 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
446 setOperationAction(ISD::FREM , MVT::f32 , Expand);
447 setOperationAction(ISD::FREM , MVT::f64 , Expand);
448 setOperationAction(ISD::FREM , MVT::f80 , Expand);
449 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
451 // Promote the i8 variants and force them on up to i32 which has a shorter
453 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
454 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
455 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
456 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
457 if (Subtarget->hasBMI()) {
458 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
459 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
460 if (Subtarget->is64Bit())
461 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
463 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
464 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
465 if (Subtarget->is64Bit())
466 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
469 if (Subtarget->hasLZCNT()) {
470 // When promoting the i8 variants, force them to i32 for a shorter
472 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
473 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
478 if (Subtarget->is64Bit())
479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
481 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
482 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
483 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
484 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
485 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
486 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
487 if (Subtarget->is64Bit()) {
488 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
489 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
493 // Special handling for half-precision floating point conversions.
494 // If we don't have F16C support, then lower half float conversions
495 // into library calls.
496 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
497 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
498 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
501 // There's never any support for operations beyond MVT::f32.
502 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
503 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
504 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
505 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
507 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
508 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
509 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
510 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
511 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
512 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
514 if (Subtarget->hasPOPCNT()) {
515 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
517 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
518 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
519 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
520 if (Subtarget->is64Bit())
521 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
524 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
526 if (!Subtarget->hasMOVBE())
527 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
529 // These should be promoted to a larger select which is supported.
530 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
531 // X86 wants to expand cmov itself.
532 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
533 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
534 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
535 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
536 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
537 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
538 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
539 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
540 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
541 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
542 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
543 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
544 if (Subtarget->is64Bit()) {
545 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
546 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
548 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
549 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
550 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
551 // support continuation, user-level threading, and etc.. As a result, no
552 // other SjLj exception interfaces are implemented and please don't build
553 // your own exception handling based on them.
554 // LLVM/Clang supports zero-cost DWARF exception handling.
555 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
556 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
559 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
560 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
561 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
562 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
563 if (Subtarget->is64Bit())
564 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
565 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
566 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
567 if (Subtarget->is64Bit()) {
568 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
569 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
570 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
571 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
572 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
574 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
575 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
576 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
577 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
578 if (Subtarget->is64Bit()) {
579 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
580 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
581 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
584 if (Subtarget->hasSSE1())
585 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
587 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
589 // Expand certain atomics
590 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
592 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
593 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
594 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
597 if (Subtarget->hasCmpxchg16b()) {
598 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
601 // FIXME - use subtarget debug flags
602 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
603 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
604 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
607 if (Subtarget->is64Bit()) {
608 setExceptionPointerRegister(X86::RAX);
609 setExceptionSelectorRegister(X86::RDX);
611 setExceptionPointerRegister(X86::EAX);
612 setExceptionSelectorRegister(X86::EDX);
614 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
615 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
617 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
618 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
620 setOperationAction(ISD::TRAP, MVT::Other, Legal);
621 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
623 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
624 setOperationAction(ISD::VASTART , MVT::Other, Custom);
625 setOperationAction(ISD::VAEND , MVT::Other, Expand);
626 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
627 // TargetInfo::X86_64ABIBuiltinVaList
628 setOperationAction(ISD::VAARG , MVT::Other, Custom);
629 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
631 // TargetInfo::CharPtrBuiltinVaList
632 setOperationAction(ISD::VAARG , MVT::Other, Expand);
633 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
636 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
637 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
639 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
641 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
642 // f32 and f64 use SSE.
643 // Set up the FP register classes.
644 addRegisterClass(MVT::f32, &X86::FR32RegClass);
645 addRegisterClass(MVT::f64, &X86::FR64RegClass);
647 // Use ANDPD to simulate FABS.
648 setOperationAction(ISD::FABS , MVT::f64, Custom);
649 setOperationAction(ISD::FABS , MVT::f32, Custom);
651 // Use XORP to simulate FNEG.
652 setOperationAction(ISD::FNEG , MVT::f64, Custom);
653 setOperationAction(ISD::FNEG , MVT::f32, Custom);
655 // Use ANDPD and ORPD to simulate FCOPYSIGN.
656 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
657 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
659 // Lower this to FGETSIGNx86 plus an AND.
660 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
661 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
663 // We don't support sin/cos/fmod
664 setOperationAction(ISD::FSIN , MVT::f64, Expand);
665 setOperationAction(ISD::FCOS , MVT::f64, Expand);
666 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
667 setOperationAction(ISD::FSIN , MVT::f32, Expand);
668 setOperationAction(ISD::FCOS , MVT::f32, Expand);
669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
671 // Expand FP immediates into loads from the stack, except for the special
673 addLegalFPImmediate(APFloat(+0.0)); // xorpd
674 addLegalFPImmediate(APFloat(+0.0f)); // xorps
675 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
676 // Use SSE for f32, x87 for f64.
677 // Set up the FP register classes.
678 addRegisterClass(MVT::f32, &X86::FR32RegClass);
679 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
681 // Use ANDPS to simulate FABS.
682 setOperationAction(ISD::FABS , MVT::f32, Custom);
684 // Use XORP to simulate FNEG.
685 setOperationAction(ISD::FNEG , MVT::f32, Custom);
687 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
689 // Use ANDPS and ORPS to simulate FCOPYSIGN.
690 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
691 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
693 // We don't support sin/cos/fmod
694 setOperationAction(ISD::FSIN , MVT::f32, Expand);
695 setOperationAction(ISD::FCOS , MVT::f32, Expand);
696 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
698 // Special cases we handle for FP constants.
699 addLegalFPImmediate(APFloat(+0.0f)); // xorps
700 addLegalFPImmediate(APFloat(+0.0)); // FLD0
701 addLegalFPImmediate(APFloat(+1.0)); // FLD1
702 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
703 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
705 if (!TM.Options.UnsafeFPMath) {
706 setOperationAction(ISD::FSIN , MVT::f64, Expand);
707 setOperationAction(ISD::FCOS , MVT::f64, Expand);
708 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
710 } else if (!TM.Options.UseSoftFloat) {
711 // f32 and f64 in x87.
712 // Set up the FP register classes.
713 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
714 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
716 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
717 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
718 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
719 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
721 if (!TM.Options.UnsafeFPMath) {
722 setOperationAction(ISD::FSIN , MVT::f64, Expand);
723 setOperationAction(ISD::FSIN , MVT::f32, Expand);
724 setOperationAction(ISD::FCOS , MVT::f64, Expand);
725 setOperationAction(ISD::FCOS , MVT::f32, Expand);
726 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
727 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
729 addLegalFPImmediate(APFloat(+0.0)); // FLD0
730 addLegalFPImmediate(APFloat(+1.0)); // FLD1
731 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
732 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
733 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
734 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
735 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
736 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
739 // We don't support FMA.
740 setOperationAction(ISD::FMA, MVT::f64, Expand);
741 setOperationAction(ISD::FMA, MVT::f32, Expand);
743 // Long double always uses X87.
744 if (!TM.Options.UseSoftFloat) {
745 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
746 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
747 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
749 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
750 addLegalFPImmediate(TmpFlt); // FLD0
752 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
755 APFloat TmpFlt2(+1.0);
756 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
758 addLegalFPImmediate(TmpFlt2); // FLD1
759 TmpFlt2.changeSign();
760 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
763 if (!TM.Options.UnsafeFPMath) {
764 setOperationAction(ISD::FSIN , MVT::f80, Expand);
765 setOperationAction(ISD::FCOS , MVT::f80, Expand);
766 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
769 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
770 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
771 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
772 setOperationAction(ISD::FRINT, MVT::f80, Expand);
773 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
774 setOperationAction(ISD::FMA, MVT::f80, Expand);
777 // Always use a library call for pow.
778 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
779 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
780 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
782 setOperationAction(ISD::FLOG, MVT::f80, Expand);
783 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
784 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
785 setOperationAction(ISD::FEXP, MVT::f80, Expand);
786 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
787 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
788 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
790 // First set operation action for all vector types to either promote
791 // (for widening) or expand (for scalarization). Then we will selectively
792 // turn on ones that can be effectively codegen'd.
793 for (MVT VT : MVT::vector_valuetypes()) {
794 setOperationAction(ISD::ADD , VT, Expand);
795 setOperationAction(ISD::SUB , VT, Expand);
796 setOperationAction(ISD::FADD, VT, Expand);
797 setOperationAction(ISD::FNEG, VT, Expand);
798 setOperationAction(ISD::FSUB, VT, Expand);
799 setOperationAction(ISD::MUL , VT, Expand);
800 setOperationAction(ISD::FMUL, VT, Expand);
801 setOperationAction(ISD::SDIV, VT, Expand);
802 setOperationAction(ISD::UDIV, VT, Expand);
803 setOperationAction(ISD::FDIV, VT, Expand);
804 setOperationAction(ISD::SREM, VT, Expand);
805 setOperationAction(ISD::UREM, VT, Expand);
806 setOperationAction(ISD::LOAD, VT, Expand);
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
808 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
809 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
810 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
811 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
812 setOperationAction(ISD::FABS, VT, Expand);
813 setOperationAction(ISD::FSIN, VT, Expand);
814 setOperationAction(ISD::FSINCOS, VT, Expand);
815 setOperationAction(ISD::FCOS, VT, Expand);
816 setOperationAction(ISD::FSINCOS, VT, Expand);
817 setOperationAction(ISD::FREM, VT, Expand);
818 setOperationAction(ISD::FMA, VT, Expand);
819 setOperationAction(ISD::FPOWI, VT, Expand);
820 setOperationAction(ISD::FSQRT, VT, Expand);
821 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
822 setOperationAction(ISD::FFLOOR, VT, Expand);
823 setOperationAction(ISD::FCEIL, VT, Expand);
824 setOperationAction(ISD::FTRUNC, VT, Expand);
825 setOperationAction(ISD::FRINT, VT, Expand);
826 setOperationAction(ISD::FNEARBYINT, VT, Expand);
827 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
828 setOperationAction(ISD::MULHS, VT, Expand);
829 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
830 setOperationAction(ISD::MULHU, VT, Expand);
831 setOperationAction(ISD::SDIVREM, VT, Expand);
832 setOperationAction(ISD::UDIVREM, VT, Expand);
833 setOperationAction(ISD::FPOW, VT, Expand);
834 setOperationAction(ISD::CTPOP, VT, Expand);
835 setOperationAction(ISD::CTTZ, VT, Expand);
836 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
837 setOperationAction(ISD::CTLZ, VT, Expand);
838 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
839 setOperationAction(ISD::SHL, VT, Expand);
840 setOperationAction(ISD::SRA, VT, Expand);
841 setOperationAction(ISD::SRL, VT, Expand);
842 setOperationAction(ISD::ROTL, VT, Expand);
843 setOperationAction(ISD::ROTR, VT, Expand);
844 setOperationAction(ISD::BSWAP, VT, Expand);
845 setOperationAction(ISD::SETCC, VT, Expand);
846 setOperationAction(ISD::FLOG, VT, Expand);
847 setOperationAction(ISD::FLOG2, VT, Expand);
848 setOperationAction(ISD::FLOG10, VT, Expand);
849 setOperationAction(ISD::FEXP, VT, Expand);
850 setOperationAction(ISD::FEXP2, VT, Expand);
851 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
852 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
853 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
854 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
855 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
856 setOperationAction(ISD::TRUNCATE, VT, Expand);
857 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
858 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
859 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
860 setOperationAction(ISD::VSELECT, VT, Expand);
861 setOperationAction(ISD::SELECT_CC, VT, Expand);
862 for (MVT InnerVT : MVT::vector_valuetypes()) {
863 setTruncStoreAction(InnerVT, VT, Expand);
865 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
866 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
868 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
869 // types, we have to deal with them whether we ask for Expansion or not.
870 // Setting Expand causes its own optimisation problems though, so leave
872 if (VT.getVectorElementType() == MVT::i1)
873 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
877 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
878 // with -msoft-float, disable use of MMX as well.
879 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
880 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
881 // No operations on x86mmx supported, everything uses intrinsics.
884 // MMX-sized vectors (other than x86mmx) are expected to be expanded
885 // into smaller operations.
886 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
887 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
888 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
889 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
890 setOperationAction(ISD::AND, MVT::v8i8, Expand);
891 setOperationAction(ISD::AND, MVT::v4i16, Expand);
892 setOperationAction(ISD::AND, MVT::v2i32, Expand);
893 setOperationAction(ISD::AND, MVT::v1i64, Expand);
894 setOperationAction(ISD::OR, MVT::v8i8, Expand);
895 setOperationAction(ISD::OR, MVT::v4i16, Expand);
896 setOperationAction(ISD::OR, MVT::v2i32, Expand);
897 setOperationAction(ISD::OR, MVT::v1i64, Expand);
898 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
899 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
900 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
901 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
902 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
903 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
904 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
905 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
907 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
908 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
909 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
910 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
911 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
912 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
913 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
914 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
916 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
917 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
919 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
920 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
921 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
922 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
923 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
924 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
925 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
926 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
927 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
928 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
929 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
930 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
931 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
934 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
935 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
937 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
938 // registers cannot be used even for integer operations.
939 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
940 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
941 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
942 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
944 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
945 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
946 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
947 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
948 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
949 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
950 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
951 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
952 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
953 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
954 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
955 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
956 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
957 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
958 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
959 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
960 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
961 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
962 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
963 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
964 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
965 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
967 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
968 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
969 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
970 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
972 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
973 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
974 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
976 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
978 // Only provide customized ctpop vector bit twiddling for vector types we
979 // know to perform better than using the popcnt instructions on each vector
980 // element. If popcnt isn't supported, always provide the custom version.
981 if (!Subtarget->hasPOPCNT()) {
982 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
983 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
986 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
987 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
988 MVT VT = (MVT::SimpleValueType)i;
989 // Do not attempt to custom lower non-power-of-2 vectors
990 if (!isPowerOf2_32(VT.getVectorNumElements()))
992 // Do not attempt to custom lower non-128-bit vectors
993 if (!VT.is128BitVector())
995 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
996 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
997 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1000 // We support custom legalizing of sext and anyext loads for specific
1001 // memory vector types which we can load as a scalar (or sequence of
1002 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1003 // loads these must work with a single scalar load.
1004 for (MVT VT : MVT::integer_vector_valuetypes()) {
1005 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1006 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1007 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1008 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1009 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1010 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1011 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1012 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1013 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1016 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1017 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1018 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1019 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1020 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1021 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1023 if (Subtarget->is64Bit()) {
1024 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1025 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1028 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1029 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1030 MVT VT = (MVT::SimpleValueType)i;
1032 // Do not attempt to promote non-128-bit vectors
1033 if (!VT.is128BitVector())
1036 setOperationAction(ISD::AND, VT, Promote);
1037 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1038 setOperationAction(ISD::OR, VT, Promote);
1039 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1040 setOperationAction(ISD::XOR, VT, Promote);
1041 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1042 setOperationAction(ISD::LOAD, VT, Promote);
1043 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1044 setOperationAction(ISD::SELECT, VT, Promote);
1045 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1048 // Custom lower v2i64 and v2f64 selects.
1049 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1050 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1051 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1052 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1054 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1055 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1057 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1058 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1059 // As there is no 64-bit GPR available, we need build a special custom
1060 // sequence to convert from v2i32 to v2f32.
1061 if (!Subtarget->is64Bit())
1062 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1064 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1065 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1067 for (MVT VT : MVT::fp_vector_valuetypes())
1068 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1070 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1071 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1072 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1075 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1076 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1077 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1078 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1079 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1080 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1081 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1082 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1083 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1084 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1085 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1087 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1088 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1089 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1090 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1091 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1092 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1093 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1094 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1095 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1096 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1098 // FIXME: Do we need to handle scalar-to-vector here?
1099 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1101 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1102 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1103 setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
1104 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
1105 setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
1106 // There is no BLENDI for byte vectors. We don't need to custom lower
1107 // some vselects for now.
1108 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1110 // SSE41 brings specific instructions for doing vector sign extend even in
1111 // cases where we don't have SRA.
1112 for (MVT VT : MVT::integer_vector_valuetypes()) {
1113 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1114 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1115 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1118 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1119 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1120 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1121 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1122 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1123 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1124 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1126 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1127 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1128 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1129 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1130 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1131 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1133 // i8 and i16 vectors are custom because the source register and source
1134 // source memory operand types are not the same width. f32 vectors are
1135 // custom since the immediate controlling the insert encodes additional
1137 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1138 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1139 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1140 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1142 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1144 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1145 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1147 // FIXME: these should be Legal, but that's only for the case where
1148 // the index is constant. For now custom expand to deal with that.
1149 if (Subtarget->is64Bit()) {
1150 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1151 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1155 if (Subtarget->hasSSE2()) {
1156 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1157 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1159 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1160 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1162 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1163 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1165 // In the customized shift lowering, the legal cases in AVX2 will be
1167 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1168 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1170 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1171 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1173 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1176 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1177 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1178 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1179 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1180 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1181 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1182 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1184 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1185 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1186 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1188 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1190 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1191 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1192 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1193 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1194 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1195 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1196 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1197 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1198 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1199 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1201 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1202 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1203 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1204 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1205 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1206 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1207 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1209 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1210 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1211 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1212 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1214 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1215 // even though v8i16 is a legal type.
1216 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1217 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1218 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1220 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1221 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1222 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1224 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1225 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1227 for (MVT VT : MVT::fp_vector_valuetypes())
1228 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1230 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1231 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1233 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1234 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1236 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1237 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1239 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1240 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1241 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1242 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1244 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1245 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1246 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1248 setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
1249 setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
1250 setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
1251 setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
1253 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1254 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1255 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1256 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1257 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1258 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1259 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1260 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1261 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1262 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1263 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1264 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1266 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1267 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1268 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1269 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1270 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1271 setOperationAction(ISD::FMA, MVT::f32, Legal);
1272 setOperationAction(ISD::FMA, MVT::f64, Legal);
1275 if (Subtarget->hasInt256()) {
1276 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1277 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1278 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1279 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1281 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1282 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1283 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1284 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1286 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1287 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1288 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1289 // Don't lower v32i8 because there is no 128-bit byte mul
1291 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1292 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1293 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1294 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1296 setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
1297 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1299 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1300 // when we have a 256bit-wide blend with immediate.
1301 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1303 // Only provide customized ctpop vector bit twiddling for vector types we
1304 // know to perform better than using the popcnt instructions on each
1305 // vector element. If popcnt isn't supported, always provide the custom
1307 if (!Subtarget->hasPOPCNT())
1308 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1310 // Custom CTPOP always performs better on natively supported v8i32
1311 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1313 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1314 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1315 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1316 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1317 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1318 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1319 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1321 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1322 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1323 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1324 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1325 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1326 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1328 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1329 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1330 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1331 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1333 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1334 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1335 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1336 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1338 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1339 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1340 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1341 // Don't lower v32i8 because there is no 128-bit byte mul
1344 // In the customized shift lowering, the legal cases in AVX2 will be
1346 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1347 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1349 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1350 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1352 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1354 // Custom lower several nodes for 256-bit types.
1355 for (MVT VT : MVT::vector_valuetypes()) {
1356 if (VT.getScalarSizeInBits() >= 32) {
1357 setOperationAction(ISD::MLOAD, VT, Legal);
1358 setOperationAction(ISD::MSTORE, VT, Legal);
1360 // Extract subvector is special because the value type
1361 // (result) is 128-bit but the source is 256-bit wide.
1362 if (VT.is128BitVector()) {
1363 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1365 // Do not attempt to custom lower other non-256-bit vectors
1366 if (!VT.is256BitVector())
1369 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1370 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1371 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1372 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1373 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1374 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1375 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1378 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1379 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1380 MVT VT = (MVT::SimpleValueType)i;
1382 // Do not attempt to promote non-256-bit vectors
1383 if (!VT.is256BitVector())
1386 setOperationAction(ISD::AND, VT, Promote);
1387 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1388 setOperationAction(ISD::OR, VT, Promote);
1389 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1390 setOperationAction(ISD::XOR, VT, Promote);
1391 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1392 setOperationAction(ISD::LOAD, VT, Promote);
1393 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1394 setOperationAction(ISD::SELECT, VT, Promote);
1395 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1399 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1400 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1401 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1402 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1403 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1405 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1406 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1407 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1409 for (MVT VT : MVT::fp_vector_valuetypes())
1410 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1412 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1413 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1414 setOperationAction(ISD::XOR, MVT::i1, Legal);
1415 setOperationAction(ISD::OR, MVT::i1, Legal);
1416 setOperationAction(ISD::AND, MVT::i1, Legal);
1417 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1418 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1419 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1420 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1421 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1423 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1424 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1425 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1426 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1427 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1428 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1430 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1431 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1432 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1433 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1434 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1435 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1436 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1437 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1439 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1440 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1441 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1442 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1443 if (Subtarget->is64Bit()) {
1444 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1445 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1446 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1447 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1449 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1450 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1451 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1452 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1453 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1454 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1455 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1456 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1457 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1458 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1459 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1460 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1461 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1462 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1464 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1465 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1466 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1467 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1468 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1469 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1470 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1471 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1472 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1473 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1474 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1475 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1476 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1478 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1479 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1480 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1481 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1482 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1483 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1485 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1486 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1488 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1490 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1491 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1492 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1493 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1494 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1495 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1496 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1497 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1498 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1500 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1501 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1503 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1504 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1506 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1508 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1509 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1511 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1512 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1514 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1515 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1517 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1518 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1519 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1520 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1521 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1522 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1524 if (Subtarget->hasCDI()) {
1525 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1526 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1529 // Custom lower several nodes.
1530 for (MVT VT : MVT::vector_valuetypes()) {
1531 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1532 // Extract subvector is special because the value type
1533 // (result) is 256/128-bit but the source is 512-bit wide.
1534 if (VT.is128BitVector() || VT.is256BitVector()) {
1535 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1537 if (VT.getVectorElementType() == MVT::i1)
1538 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1540 // Do not attempt to custom lower other non-512-bit vectors
1541 if (!VT.is512BitVector())
1544 if ( EltSize >= 32) {
1545 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1546 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1547 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1548 setOperationAction(ISD::VSELECT, VT, Legal);
1549 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1550 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1551 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1552 setOperationAction(ISD::MLOAD, VT, Legal);
1553 setOperationAction(ISD::MSTORE, VT, Legal);
1556 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1557 MVT VT = (MVT::SimpleValueType)i;
1559 // Do not attempt to promote non-512-bit vectors.
1560 if (!VT.is512BitVector())
1563 setOperationAction(ISD::SELECT, VT, Promote);
1564 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1568 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1569 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1570 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1572 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1573 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1575 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1576 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1577 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1578 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1579 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1580 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1581 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1582 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1583 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1585 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1586 const MVT VT = (MVT::SimpleValueType)i;
1588 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1590 // Do not attempt to promote non-512-bit vectors.
1591 if (!VT.is512BitVector())
1595 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1596 setOperationAction(ISD::VSELECT, VT, Legal);
1601 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1602 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1603 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1605 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1606 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1607 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1609 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1610 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1611 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1612 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1613 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1614 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1617 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1618 // of this type with custom code.
1619 for (MVT VT : MVT::vector_valuetypes())
1620 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1622 // We want to custom lower some of our intrinsics.
1623 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1624 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1625 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1626 if (!Subtarget->is64Bit())
1627 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1629 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1630 // handle type legalization for these operations here.
1632 // FIXME: We really should do custom legalization for addition and
1633 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1634 // than generic legalization for 64-bit multiplication-with-overflow, though.
1635 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1636 // Add/Sub/Mul with overflow operations are custom lowered.
1638 setOperationAction(ISD::SADDO, VT, Custom);
1639 setOperationAction(ISD::UADDO, VT, Custom);
1640 setOperationAction(ISD::SSUBO, VT, Custom);
1641 setOperationAction(ISD::USUBO, VT, Custom);
1642 setOperationAction(ISD::SMULO, VT, Custom);
1643 setOperationAction(ISD::UMULO, VT, Custom);
1647 if (!Subtarget->is64Bit()) {
1648 // These libcalls are not available in 32-bit.
1649 setLibcallName(RTLIB::SHL_I128, nullptr);
1650 setLibcallName(RTLIB::SRL_I128, nullptr);
1651 setLibcallName(RTLIB::SRA_I128, nullptr);
1654 // Combine sin / cos into one node or libcall if possible.
1655 if (Subtarget->hasSinCos()) {
1656 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1657 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1658 if (Subtarget->isTargetDarwin()) {
1659 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1660 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1661 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1662 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1666 if (Subtarget->isTargetWin64()) {
1667 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1668 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1669 setOperationAction(ISD::SREM, MVT::i128, Custom);
1670 setOperationAction(ISD::UREM, MVT::i128, Custom);
1671 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1672 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1675 // We have target-specific dag combine patterns for the following nodes:
1676 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1677 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1678 setTargetDAGCombine(ISD::BITCAST);
1679 setTargetDAGCombine(ISD::VSELECT);
1680 setTargetDAGCombine(ISD::SELECT);
1681 setTargetDAGCombine(ISD::SHL);
1682 setTargetDAGCombine(ISD::SRA);
1683 setTargetDAGCombine(ISD::SRL);
1684 setTargetDAGCombine(ISD::OR);
1685 setTargetDAGCombine(ISD::AND);
1686 setTargetDAGCombine(ISD::ADD);
1687 setTargetDAGCombine(ISD::FADD);
1688 setTargetDAGCombine(ISD::FSUB);
1689 setTargetDAGCombine(ISD::FMA);
1690 setTargetDAGCombine(ISD::SUB);
1691 setTargetDAGCombine(ISD::LOAD);
1692 setTargetDAGCombine(ISD::MLOAD);
1693 setTargetDAGCombine(ISD::STORE);
1694 setTargetDAGCombine(ISD::MSTORE);
1695 setTargetDAGCombine(ISD::ZERO_EXTEND);
1696 setTargetDAGCombine(ISD::ANY_EXTEND);
1697 setTargetDAGCombine(ISD::SIGN_EXTEND);
1698 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1699 setTargetDAGCombine(ISD::TRUNCATE);
1700 setTargetDAGCombine(ISD::SINT_TO_FP);
1701 setTargetDAGCombine(ISD::SETCC);
1702 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1703 setTargetDAGCombine(ISD::BUILD_VECTOR);
1704 setTargetDAGCombine(ISD::MUL);
1705 setTargetDAGCombine(ISD::XOR);
1707 computeRegisterProperties();
1709 // On Darwin, -Os means optimize for size without hurting performance,
1710 // do not reduce the limit.
1711 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1712 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1713 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1714 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1715 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1716 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1717 setPrefLoopAlignment(4); // 2^4 bytes.
1719 // Predictable cmov don't hurt on atom because it's in-order.
1720 PredictableSelectIsExpensive = !Subtarget->isAtom();
1721 EnableExtLdPromotion = true;
1722 setPrefFunctionAlignment(4); // 2^4 bytes.
1724 verifyIntrinsicTables();
1727 // This has so far only been implemented for 64-bit MachO.
1728 bool X86TargetLowering::useLoadStackGuardNode() const {
1729 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1732 TargetLoweringBase::LegalizeTypeAction
1733 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1734 if (ExperimentalVectorWideningLegalization &&
1735 VT.getVectorNumElements() != 1 &&
1736 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1737 return TypeWidenVector;
1739 return TargetLoweringBase::getPreferredVectorAction(VT);
1742 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1744 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1746 const unsigned NumElts = VT.getVectorNumElements();
1747 const EVT EltVT = VT.getVectorElementType();
1748 if (VT.is512BitVector()) {
1749 if (Subtarget->hasAVX512())
1750 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1751 EltVT == MVT::f32 || EltVT == MVT::f64)
1753 case 8: return MVT::v8i1;
1754 case 16: return MVT::v16i1;
1756 if (Subtarget->hasBWI())
1757 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1759 case 32: return MVT::v32i1;
1760 case 64: return MVT::v64i1;
1764 if (VT.is256BitVector() || VT.is128BitVector()) {
1765 if (Subtarget->hasVLX())
1766 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1767 EltVT == MVT::f32 || EltVT == MVT::f64)
1769 case 2: return MVT::v2i1;
1770 case 4: return MVT::v4i1;
1771 case 8: return MVT::v8i1;
1773 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1774 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1776 case 8: return MVT::v8i1;
1777 case 16: return MVT::v16i1;
1778 case 32: return MVT::v32i1;
1782 return VT.changeVectorElementTypeToInteger();
1785 /// Helper for getByValTypeAlignment to determine
1786 /// the desired ByVal argument alignment.
1787 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1790 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1791 if (VTy->getBitWidth() == 128)
1793 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1794 unsigned EltAlign = 0;
1795 getMaxByValAlign(ATy->getElementType(), EltAlign);
1796 if (EltAlign > MaxAlign)
1797 MaxAlign = EltAlign;
1798 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1799 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1800 unsigned EltAlign = 0;
1801 getMaxByValAlign(STy->getElementType(i), EltAlign);
1802 if (EltAlign > MaxAlign)
1803 MaxAlign = EltAlign;
1810 /// Return the desired alignment for ByVal aggregate
1811 /// function arguments in the caller parameter area. For X86, aggregates
1812 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1813 /// are at 4-byte boundaries.
1814 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1815 if (Subtarget->is64Bit()) {
1816 // Max of 8 and alignment of type.
1817 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1824 if (Subtarget->hasSSE1())
1825 getMaxByValAlign(Ty, Align);
1829 /// Returns the target specific optimal type for load
1830 /// and store operations as a result of memset, memcpy, and memmove
1831 /// lowering. If DstAlign is zero that means it's safe to destination
1832 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1833 /// means there isn't a need to check it against alignment requirement,
1834 /// probably because the source does not need to be loaded. If 'IsMemset' is
1835 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1836 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1837 /// source is constant so it does not need to be loaded.
1838 /// It returns EVT::Other if the type should be determined using generic
1839 /// target-independent logic.
1841 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1842 unsigned DstAlign, unsigned SrcAlign,
1843 bool IsMemset, bool ZeroMemset,
1845 MachineFunction &MF) const {
1846 const Function *F = MF.getFunction();
1847 if ((!IsMemset || ZeroMemset) &&
1848 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1850 (Subtarget->isUnalignedMemAccessFast() ||
1851 ((DstAlign == 0 || DstAlign >= 16) &&
1852 (SrcAlign == 0 || SrcAlign >= 16)))) {
1854 if (Subtarget->hasInt256())
1856 if (Subtarget->hasFp256())
1859 if (Subtarget->hasSSE2())
1861 if (Subtarget->hasSSE1())
1863 } else if (!MemcpyStrSrc && Size >= 8 &&
1864 !Subtarget->is64Bit() &&
1865 Subtarget->hasSSE2()) {
1866 // Do not use f64 to lower memcpy if source is string constant. It's
1867 // better to use i32 to avoid the loads.
1871 if (Subtarget->is64Bit() && Size >= 8)
1876 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1878 return X86ScalarSSEf32;
1879 else if (VT == MVT::f64)
1880 return X86ScalarSSEf64;
1885 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1890 *Fast = Subtarget->isUnalignedMemAccessFast();
1894 /// Return the entry encoding for a jump table in the
1895 /// current function. The returned value is a member of the
1896 /// MachineJumpTableInfo::JTEntryKind enum.
1897 unsigned X86TargetLowering::getJumpTableEncoding() const {
1898 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1900 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1901 Subtarget->isPICStyleGOT())
1902 return MachineJumpTableInfo::EK_Custom32;
1904 // Otherwise, use the normal jump table encoding heuristics.
1905 return TargetLowering::getJumpTableEncoding();
1909 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1910 const MachineBasicBlock *MBB,
1911 unsigned uid,MCContext &Ctx) const{
1912 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1913 Subtarget->isPICStyleGOT());
1914 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1916 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1917 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1920 /// Returns relocation base for the given PIC jumptable.
1921 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1922 SelectionDAG &DAG) const {
1923 if (!Subtarget->is64Bit())
1924 // This doesn't have SDLoc associated with it, but is not really the
1925 // same as a Register.
1926 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1930 /// This returns the relocation base for the given PIC jumptable,
1931 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1932 const MCExpr *X86TargetLowering::
1933 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1934 MCContext &Ctx) const {
1935 // X86-64 uses RIP relative addressing based on the jump table label.
1936 if (Subtarget->isPICStyleRIPRel())
1937 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1939 // Otherwise, the reference is relative to the PIC base.
1940 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1943 // FIXME: Why this routine is here? Move to RegInfo!
1944 std::pair<const TargetRegisterClass*, uint8_t>
1945 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1946 const TargetRegisterClass *RRC = nullptr;
1948 switch (VT.SimpleTy) {
1950 return TargetLowering::findRepresentativeClass(VT);
1951 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1952 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1955 RRC = &X86::VR64RegClass;
1957 case MVT::f32: case MVT::f64:
1958 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1959 case MVT::v4f32: case MVT::v2f64:
1960 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1962 RRC = &X86::VR128RegClass;
1965 return std::make_pair(RRC, Cost);
1968 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1969 unsigned &Offset) const {
1970 if (!Subtarget->isTargetLinux())
1973 if (Subtarget->is64Bit()) {
1974 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1976 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1988 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1989 unsigned DestAS) const {
1990 assert(SrcAS != DestAS && "Expected different address spaces!");
1992 return SrcAS < 256 && DestAS < 256;
1995 //===----------------------------------------------------------------------===//
1996 // Return Value Calling Convention Implementation
1997 //===----------------------------------------------------------------------===//
1999 #include "X86GenCallingConv.inc"
2002 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2003 MachineFunction &MF, bool isVarArg,
2004 const SmallVectorImpl<ISD::OutputArg> &Outs,
2005 LLVMContext &Context) const {
2006 SmallVector<CCValAssign, 16> RVLocs;
2007 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2008 return CCInfo.CheckReturn(Outs, RetCC_X86);
2011 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2012 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2017 X86TargetLowering::LowerReturn(SDValue Chain,
2018 CallingConv::ID CallConv, bool isVarArg,
2019 const SmallVectorImpl<ISD::OutputArg> &Outs,
2020 const SmallVectorImpl<SDValue> &OutVals,
2021 SDLoc dl, SelectionDAG &DAG) const {
2022 MachineFunction &MF = DAG.getMachineFunction();
2023 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2025 SmallVector<CCValAssign, 16> RVLocs;
2026 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2027 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2030 SmallVector<SDValue, 6> RetOps;
2031 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2032 // Operand #1 = Bytes To Pop
2033 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2036 // Copy the result values into the output registers.
2037 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2038 CCValAssign &VA = RVLocs[i];
2039 assert(VA.isRegLoc() && "Can only return in registers!");
2040 SDValue ValToCopy = OutVals[i];
2041 EVT ValVT = ValToCopy.getValueType();
2043 // Promote values to the appropriate types.
2044 if (VA.getLocInfo() == CCValAssign::SExt)
2045 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2046 else if (VA.getLocInfo() == CCValAssign::ZExt)
2047 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2048 else if (VA.getLocInfo() == CCValAssign::AExt)
2049 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2050 else if (VA.getLocInfo() == CCValAssign::BCvt)
2051 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2053 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2054 "Unexpected FP-extend for return value.");
2056 // If this is x86-64, and we disabled SSE, we can't return FP values,
2057 // or SSE or MMX vectors.
2058 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2059 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2060 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2061 report_fatal_error("SSE register return with SSE disabled");
2063 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2064 // llvm-gcc has never done it right and no one has noticed, so this
2065 // should be OK for now.
2066 if (ValVT == MVT::f64 &&
2067 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2068 report_fatal_error("SSE2 register return with SSE2 disabled");
2070 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2071 // the RET instruction and handled by the FP Stackifier.
2072 if (VA.getLocReg() == X86::FP0 ||
2073 VA.getLocReg() == X86::FP1) {
2074 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2075 // change the value to the FP stack register class.
2076 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2077 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2078 RetOps.push_back(ValToCopy);
2079 // Don't emit a copytoreg.
2083 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2084 // which is returned in RAX / RDX.
2085 if (Subtarget->is64Bit()) {
2086 if (ValVT == MVT::x86mmx) {
2087 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2088 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2089 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2091 // If we don't have SSE2 available, convert to v4f32 so the generated
2092 // register is legal.
2093 if (!Subtarget->hasSSE2())
2094 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2099 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2100 Flag = Chain.getValue(1);
2101 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2104 // The x86-64 ABIs require that for returning structs by value we copy
2105 // the sret argument into %rax/%eax (depending on ABI) for the return.
2106 // Win32 requires us to put the sret argument to %eax as well.
2107 // We saved the argument into a virtual register in the entry block,
2108 // so now we copy the value out and into %rax/%eax.
2110 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2111 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2112 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2113 // either case FuncInfo->setSRetReturnReg() will have been called.
2114 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2115 assert((Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) &&
2116 "No need for an sret register");
2117 SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg, getPointerTy());
2120 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2121 X86::RAX : X86::EAX;
2122 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2123 Flag = Chain.getValue(1);
2125 // RAX/EAX now acts like a return value.
2126 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2129 RetOps[0] = Chain; // Update chain.
2131 // Add the flag if we have it.
2133 RetOps.push_back(Flag);
2135 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2138 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2139 if (N->getNumValues() != 1)
2141 if (!N->hasNUsesOfValue(1, 0))
2144 SDValue TCChain = Chain;
2145 SDNode *Copy = *N->use_begin();
2146 if (Copy->getOpcode() == ISD::CopyToReg) {
2147 // If the copy has a glue operand, we conservatively assume it isn't safe to
2148 // perform a tail call.
2149 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2151 TCChain = Copy->getOperand(0);
2152 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2155 bool HasRet = false;
2156 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2158 if (UI->getOpcode() != X86ISD::RET_FLAG)
2160 // If we are returning more than one value, we can definitely
2161 // not make a tail call see PR19530
2162 if (UI->getNumOperands() > 4)
2164 if (UI->getNumOperands() == 4 &&
2165 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2178 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2179 ISD::NodeType ExtendKind) const {
2181 // TODO: Is this also valid on 32-bit?
2182 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2183 ReturnMVT = MVT::i8;
2185 ReturnMVT = MVT::i32;
2187 EVT MinVT = getRegisterType(Context, ReturnMVT);
2188 return VT.bitsLT(MinVT) ? MinVT : VT;
2191 /// Lower the result values of a call into the
2192 /// appropriate copies out of appropriate physical registers.
2195 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2196 CallingConv::ID CallConv, bool isVarArg,
2197 const SmallVectorImpl<ISD::InputArg> &Ins,
2198 SDLoc dl, SelectionDAG &DAG,
2199 SmallVectorImpl<SDValue> &InVals) const {
2201 // Assign locations to each value returned by this call.
2202 SmallVector<CCValAssign, 16> RVLocs;
2203 bool Is64Bit = Subtarget->is64Bit();
2204 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2206 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2208 // Copy all of the result registers out of their specified physreg.
2209 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2210 CCValAssign &VA = RVLocs[i];
2211 EVT CopyVT = VA.getValVT();
2213 // If this is x86-64, and we disabled SSE, we can't return FP values
2214 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2215 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2216 report_fatal_error("SSE register return with SSE disabled");
2219 // If we prefer to use the value in xmm registers, copy it out as f80 and
2220 // use a truncate to move it from fp stack reg to xmm reg.
2221 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2222 isScalarFPTypeInSSEReg(VA.getValVT()))
2225 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2226 CopyVT, InFlag).getValue(1);
2227 SDValue Val = Chain.getValue(0);
2229 if (CopyVT != VA.getValVT())
2230 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2231 // This truncation won't change the value.
2232 DAG.getIntPtrConstant(1));
2234 InFlag = Chain.getValue(2);
2235 InVals.push_back(Val);
2241 //===----------------------------------------------------------------------===//
2242 // C & StdCall & Fast Calling Convention implementation
2243 //===----------------------------------------------------------------------===//
2244 // StdCall calling convention seems to be standard for many Windows' API
2245 // routines and around. It differs from C calling convention just a little:
2246 // callee should clean up the stack, not caller. Symbols should be also
2247 // decorated in some fancy way :) It doesn't support any vector arguments.
2248 // For info on fast calling convention see Fast Calling Convention (tail call)
2249 // implementation LowerX86_32FastCCCallTo.
2251 /// CallIsStructReturn - Determines whether a call uses struct return
2253 enum StructReturnType {
2258 static StructReturnType
2259 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2261 return NotStructReturn;
2263 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2264 if (!Flags.isSRet())
2265 return NotStructReturn;
2266 if (Flags.isInReg())
2267 return RegStructReturn;
2268 return StackStructReturn;
2271 /// Determines whether a function uses struct return semantics.
2272 static StructReturnType
2273 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2275 return NotStructReturn;
2277 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2278 if (!Flags.isSRet())
2279 return NotStructReturn;
2280 if (Flags.isInReg())
2281 return RegStructReturn;
2282 return StackStructReturn;
2285 /// Make a copy of an aggregate at address specified by "Src" to address
2286 /// "Dst" with size and alignment information specified by the specific
2287 /// parameter attribute. The copy will be passed as a byval function parameter.
2289 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2290 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2292 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2294 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2295 /*isVolatile*/false, /*AlwaysInline=*/true,
2296 MachinePointerInfo(), MachinePointerInfo());
2299 /// Return true if the calling convention is one that
2300 /// supports tail call optimization.
2301 static bool IsTailCallConvention(CallingConv::ID CC) {
2302 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2303 CC == CallingConv::HiPE);
2306 /// \brief Return true if the calling convention is a C calling convention.
2307 static bool IsCCallConvention(CallingConv::ID CC) {
2308 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2309 CC == CallingConv::X86_64_SysV);
2312 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2313 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2317 CallingConv::ID CalleeCC = CS.getCallingConv();
2318 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2324 /// Return true if the function is being made into
2325 /// a tailcall target by changing its ABI.
2326 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2327 bool GuaranteedTailCallOpt) {
2328 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2332 X86TargetLowering::LowerMemArgument(SDValue Chain,
2333 CallingConv::ID CallConv,
2334 const SmallVectorImpl<ISD::InputArg> &Ins,
2335 SDLoc dl, SelectionDAG &DAG,
2336 const CCValAssign &VA,
2337 MachineFrameInfo *MFI,
2339 // Create the nodes corresponding to a load from this parameter slot.
2340 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2341 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2342 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2343 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2346 // If value is passed by pointer we have address passed instead of the value
2348 if (VA.getLocInfo() == CCValAssign::Indirect)
2349 ValVT = VA.getLocVT();
2351 ValVT = VA.getValVT();
2353 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2354 // changed with more analysis.
2355 // In case of tail call optimization mark all arguments mutable. Since they
2356 // could be overwritten by lowering of arguments in case of a tail call.
2357 if (Flags.isByVal()) {
2358 unsigned Bytes = Flags.getByValSize();
2359 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2360 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2361 return DAG.getFrameIndex(FI, getPointerTy());
2363 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2364 VA.getLocMemOffset(), isImmutable);
2365 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2366 return DAG.getLoad(ValVT, dl, Chain, FIN,
2367 MachinePointerInfo::getFixedStack(FI),
2368 false, false, false, 0);
2372 // FIXME: Get this from tablegen.
2373 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2374 const X86Subtarget *Subtarget) {
2375 assert(Subtarget->is64Bit());
2377 if (Subtarget->isCallingConvWin64(CallConv)) {
2378 static const MCPhysReg GPR64ArgRegsWin64[] = {
2379 X86::RCX, X86::RDX, X86::R8, X86::R9
2381 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2384 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2385 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2387 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2390 // FIXME: Get this from tablegen.
2391 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2392 CallingConv::ID CallConv,
2393 const X86Subtarget *Subtarget) {
2394 assert(Subtarget->is64Bit());
2395 if (Subtarget->isCallingConvWin64(CallConv)) {
2396 // The XMM registers which might contain var arg parameters are shadowed
2397 // in their paired GPR. So we only need to save the GPR to their home
2399 // TODO: __vectorcall will change this.
2403 const Function *Fn = MF.getFunction();
2404 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2405 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2406 "SSE register cannot be used when SSE is disabled!");
2407 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2408 !Subtarget->hasSSE1())
2409 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2413 static const MCPhysReg XMMArgRegs64Bit[] = {
2414 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2415 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2417 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2421 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2422 CallingConv::ID CallConv,
2424 const SmallVectorImpl<ISD::InputArg> &Ins,
2427 SmallVectorImpl<SDValue> &InVals)
2429 MachineFunction &MF = DAG.getMachineFunction();
2430 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2432 const Function* Fn = MF.getFunction();
2433 if (Fn->hasExternalLinkage() &&
2434 Subtarget->isTargetCygMing() &&
2435 Fn->getName() == "main")
2436 FuncInfo->setForceFramePointer(true);
2438 MachineFrameInfo *MFI = MF.getFrameInfo();
2439 bool Is64Bit = Subtarget->is64Bit();
2440 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2442 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2443 "Var args not supported with calling convention fastcc, ghc or hipe");
2445 // Assign locations to all of the incoming arguments.
2446 SmallVector<CCValAssign, 16> ArgLocs;
2447 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2449 // Allocate shadow area for Win64
2451 CCInfo.AllocateStack(32, 8);
2453 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2455 unsigned LastVal = ~0U;
2457 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2458 CCValAssign &VA = ArgLocs[i];
2459 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2461 assert(VA.getValNo() != LastVal &&
2462 "Don't support value assigned to multiple locs yet");
2464 LastVal = VA.getValNo();
2466 if (VA.isRegLoc()) {
2467 EVT RegVT = VA.getLocVT();
2468 const TargetRegisterClass *RC;
2469 if (RegVT == MVT::i32)
2470 RC = &X86::GR32RegClass;
2471 else if (Is64Bit && RegVT == MVT::i64)
2472 RC = &X86::GR64RegClass;
2473 else if (RegVT == MVT::f32)
2474 RC = &X86::FR32RegClass;
2475 else if (RegVT == MVT::f64)
2476 RC = &X86::FR64RegClass;
2477 else if (RegVT.is512BitVector())
2478 RC = &X86::VR512RegClass;
2479 else if (RegVT.is256BitVector())
2480 RC = &X86::VR256RegClass;
2481 else if (RegVT.is128BitVector())
2482 RC = &X86::VR128RegClass;
2483 else if (RegVT == MVT::x86mmx)
2484 RC = &X86::VR64RegClass;
2485 else if (RegVT == MVT::i1)
2486 RC = &X86::VK1RegClass;
2487 else if (RegVT == MVT::v8i1)
2488 RC = &X86::VK8RegClass;
2489 else if (RegVT == MVT::v16i1)
2490 RC = &X86::VK16RegClass;
2491 else if (RegVT == MVT::v32i1)
2492 RC = &X86::VK32RegClass;
2493 else if (RegVT == MVT::v64i1)
2494 RC = &X86::VK64RegClass;
2496 llvm_unreachable("Unknown argument type!");
2498 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2499 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2501 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2502 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2504 if (VA.getLocInfo() == CCValAssign::SExt)
2505 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2506 DAG.getValueType(VA.getValVT()));
2507 else if (VA.getLocInfo() == CCValAssign::ZExt)
2508 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2509 DAG.getValueType(VA.getValVT()));
2510 else if (VA.getLocInfo() == CCValAssign::BCvt)
2511 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2513 if (VA.isExtInLoc()) {
2514 // Handle MMX values passed in XMM regs.
2515 if (RegVT.isVector())
2516 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2518 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2521 assert(VA.isMemLoc());
2522 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2525 // If value is passed via pointer - do a load.
2526 if (VA.getLocInfo() == CCValAssign::Indirect)
2527 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2528 MachinePointerInfo(), false, false, false, 0);
2530 InVals.push_back(ArgValue);
2533 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2534 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2535 // The x86-64 ABIs require that for returning structs by value we copy
2536 // the sret argument into %rax/%eax (depending on ABI) for the return.
2537 // Win32 requires us to put the sret argument to %eax as well.
2538 // Save the argument into a virtual register so that we can access it
2539 // from the return points.
2540 if (Ins[i].Flags.isSRet()) {
2541 unsigned Reg = FuncInfo->getSRetReturnReg();
2543 MVT PtrTy = getPointerTy();
2544 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2545 FuncInfo->setSRetReturnReg(Reg);
2547 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2548 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2554 unsigned StackSize = CCInfo.getNextStackOffset();
2555 // Align stack specially for tail calls.
2556 if (FuncIsMadeTailCallSafe(CallConv,
2557 MF.getTarget().Options.GuaranteedTailCallOpt))
2558 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2560 // If the function takes variable number of arguments, make a frame index for
2561 // the start of the first vararg value... for expansion of llvm.va_start. We
2562 // can skip this if there are no va_start calls.
2563 if (MFI->hasVAStart() &&
2564 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2565 CallConv != CallingConv::X86_ThisCall))) {
2566 FuncInfo->setVarArgsFrameIndex(
2567 MFI->CreateFixedObject(1, StackSize, true));
2570 // Figure out if XMM registers are in use.
2571 assert(!(MF.getTarget().Options.UseSoftFloat &&
2572 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
2573 "SSE register cannot be used when SSE is disabled!");
2575 // 64-bit calling conventions support varargs and register parameters, so we
2576 // have to do extra work to spill them in the prologue.
2577 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2578 // Find the first unallocated argument registers.
2579 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2580 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2581 unsigned NumIntRegs =
2582 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2583 unsigned NumXMMRegs =
2584 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2585 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2586 "SSE register cannot be used when SSE is disabled!");
2588 // Gather all the live in physical registers.
2589 SmallVector<SDValue, 6> LiveGPRs;
2590 SmallVector<SDValue, 8> LiveXMMRegs;
2592 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2593 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2595 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2597 if (!ArgXMMs.empty()) {
2598 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2599 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2600 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2601 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2602 LiveXMMRegs.push_back(
2603 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2608 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2609 // Get to the caller-allocated home save location. Add 8 to account
2610 // for the return address.
2611 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2612 FuncInfo->setRegSaveFrameIndex(
2613 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2614 // Fixup to set vararg frame on shadow area (4 x i64).
2616 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2618 // For X86-64, if there are vararg parameters that are passed via
2619 // registers, then we must store them to their spots on the stack so
2620 // they may be loaded by deferencing the result of va_next.
2621 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2622 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2623 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2624 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2627 // Store the integer parameter registers.
2628 SmallVector<SDValue, 8> MemOps;
2629 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2631 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2632 for (SDValue Val : LiveGPRs) {
2633 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2634 DAG.getIntPtrConstant(Offset));
2636 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2637 MachinePointerInfo::getFixedStack(
2638 FuncInfo->getRegSaveFrameIndex(), Offset),
2640 MemOps.push_back(Store);
2644 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2645 // Now store the XMM (fp + vector) parameter registers.
2646 SmallVector<SDValue, 12> SaveXMMOps;
2647 SaveXMMOps.push_back(Chain);
2648 SaveXMMOps.push_back(ALVal);
2649 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2650 FuncInfo->getRegSaveFrameIndex()));
2651 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2652 FuncInfo->getVarArgsFPOffset()));
2653 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2655 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2656 MVT::Other, SaveXMMOps));
2659 if (!MemOps.empty())
2660 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2663 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2664 // Find the largest legal vector type.
2665 MVT VecVT = MVT::Other;
2666 // FIXME: Only some x86_32 calling conventions support AVX512.
2667 if (Subtarget->hasAVX512() &&
2668 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2669 CallConv == CallingConv::Intel_OCL_BI)))
2670 VecVT = MVT::v16f32;
2671 else if (Subtarget->hasAVX())
2673 else if (Subtarget->hasSSE2())
2676 // We forward some GPRs and some vector types.
2677 SmallVector<MVT, 2> RegParmTypes;
2678 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2679 RegParmTypes.push_back(IntVT);
2680 if (VecVT != MVT::Other)
2681 RegParmTypes.push_back(VecVT);
2683 // Compute the set of forwarded registers. The rest are scratch.
2684 SmallVectorImpl<ForwardedRegister> &Forwards =
2685 FuncInfo->getForwardedMustTailRegParms();
2686 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2688 // Conservatively forward AL on x86_64, since it might be used for varargs.
2689 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2690 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2691 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2694 // Copy all forwards from physical to virtual registers.
2695 for (ForwardedRegister &F : Forwards) {
2696 // FIXME: Can we use a less constrained schedule?
2697 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2698 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2699 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2703 // Some CCs need callee pop.
2704 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2705 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2706 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2708 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2709 // If this is an sret function, the return should pop the hidden pointer.
2710 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2711 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2712 argsAreStructReturn(Ins) == StackStructReturn)
2713 FuncInfo->setBytesToPopOnReturn(4);
2717 // RegSaveFrameIndex is X86-64 only.
2718 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2719 if (CallConv == CallingConv::X86_FastCall ||
2720 CallConv == CallingConv::X86_ThisCall)
2721 // fastcc functions can't have varargs.
2722 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2725 FuncInfo->setArgumentStackSize(StackSize);
2731 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2732 SDValue StackPtr, SDValue Arg,
2733 SDLoc dl, SelectionDAG &DAG,
2734 const CCValAssign &VA,
2735 ISD::ArgFlagsTy Flags) const {
2736 unsigned LocMemOffset = VA.getLocMemOffset();
2737 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2738 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2739 if (Flags.isByVal())
2740 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2742 return DAG.getStore(Chain, dl, Arg, PtrOff,
2743 MachinePointerInfo::getStack(LocMemOffset),
2747 /// Emit a load of return address if tail call
2748 /// optimization is performed and it is required.
2750 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2751 SDValue &OutRetAddr, SDValue Chain,
2752 bool IsTailCall, bool Is64Bit,
2753 int FPDiff, SDLoc dl) const {
2754 // Adjust the Return address stack slot.
2755 EVT VT = getPointerTy();
2756 OutRetAddr = getReturnAddressFrameIndex(DAG);
2758 // Load the "old" Return address.
2759 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2760 false, false, false, 0);
2761 return SDValue(OutRetAddr.getNode(), 1);
2764 /// Emit a store of the return address if tail call
2765 /// optimization is performed and it is required (FPDiff!=0).
2766 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2767 SDValue Chain, SDValue RetAddrFrIdx,
2768 EVT PtrVT, unsigned SlotSize,
2769 int FPDiff, SDLoc dl) {
2770 // Store the return address to the appropriate stack slot.
2771 if (!FPDiff) return Chain;
2772 // Calculate the new stack slot for the return address.
2773 int NewReturnAddrFI =
2774 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2776 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2777 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2778 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2784 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2785 SmallVectorImpl<SDValue> &InVals) const {
2786 SelectionDAG &DAG = CLI.DAG;
2788 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2789 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2790 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2791 SDValue Chain = CLI.Chain;
2792 SDValue Callee = CLI.Callee;
2793 CallingConv::ID CallConv = CLI.CallConv;
2794 bool &isTailCall = CLI.IsTailCall;
2795 bool isVarArg = CLI.IsVarArg;
2797 MachineFunction &MF = DAG.getMachineFunction();
2798 bool Is64Bit = Subtarget->is64Bit();
2799 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2800 StructReturnType SR = callIsStructReturn(Outs);
2801 bool IsSibcall = false;
2802 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2804 if (MF.getTarget().Options.DisableTailCalls)
2807 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2809 // Force this to be a tail call. The verifier rules are enough to ensure
2810 // that we can lower this successfully without moving the return address
2813 } else if (isTailCall) {
2814 // Check if it's really possible to do a tail call.
2815 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2816 isVarArg, SR != NotStructReturn,
2817 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2818 Outs, OutVals, Ins, DAG);
2820 // Sibcalls are automatically detected tailcalls which do not require
2822 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2829 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2830 "Var args not supported with calling convention fastcc, ghc or hipe");
2832 // Analyze operands of the call, assigning locations to each operand.
2833 SmallVector<CCValAssign, 16> ArgLocs;
2834 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2836 // Allocate shadow area for Win64
2838 CCInfo.AllocateStack(32, 8);
2840 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2842 // Get a count of how many bytes are to be pushed on the stack.
2843 unsigned NumBytes = CCInfo.getNextStackOffset();
2845 // This is a sibcall. The memory operands are available in caller's
2846 // own caller's stack.
2848 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2849 IsTailCallConvention(CallConv))
2850 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2853 if (isTailCall && !IsSibcall && !IsMustTail) {
2854 // Lower arguments at fp - stackoffset + fpdiff.
2855 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2857 FPDiff = NumBytesCallerPushed - NumBytes;
2859 // Set the delta of movement of the returnaddr stackslot.
2860 // But only set if delta is greater than previous delta.
2861 if (FPDiff < X86Info->getTCReturnAddrDelta())
2862 X86Info->setTCReturnAddrDelta(FPDiff);
2865 unsigned NumBytesToPush = NumBytes;
2866 unsigned NumBytesToPop = NumBytes;
2868 // If we have an inalloca argument, all stack space has already been allocated
2869 // for us and be right at the top of the stack. We don't support multiple
2870 // arguments passed in memory when using inalloca.
2871 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2873 if (!ArgLocs.back().isMemLoc())
2874 report_fatal_error("cannot use inalloca attribute on a register "
2876 if (ArgLocs.back().getLocMemOffset() != 0)
2877 report_fatal_error("any parameter with the inalloca attribute must be "
2878 "the only memory argument");
2882 Chain = DAG.getCALLSEQ_START(
2883 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2885 SDValue RetAddrFrIdx;
2886 // Load return address for tail calls.
2887 if (isTailCall && FPDiff)
2888 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2889 Is64Bit, FPDiff, dl);
2891 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2892 SmallVector<SDValue, 8> MemOpChains;
2895 // Walk the register/memloc assignments, inserting copies/loads. In the case
2896 // of tail call optimization arguments are handle later.
2897 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2898 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2899 // Skip inalloca arguments, they have already been written.
2900 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2901 if (Flags.isInAlloca())
2904 CCValAssign &VA = ArgLocs[i];
2905 EVT RegVT = VA.getLocVT();
2906 SDValue Arg = OutVals[i];
2907 bool isByVal = Flags.isByVal();
2909 // Promote the value if needed.
2910 switch (VA.getLocInfo()) {
2911 default: llvm_unreachable("Unknown loc info!");
2912 case CCValAssign::Full: break;
2913 case CCValAssign::SExt:
2914 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2916 case CCValAssign::ZExt:
2917 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2919 case CCValAssign::AExt:
2920 if (RegVT.is128BitVector()) {
2921 // Special case: passing MMX values in XMM registers.
2922 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2923 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2924 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2926 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2928 case CCValAssign::BCvt:
2929 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2931 case CCValAssign::Indirect: {
2932 // Store the argument.
2933 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2934 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2935 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2936 MachinePointerInfo::getFixedStack(FI),
2943 if (VA.isRegLoc()) {
2944 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2945 if (isVarArg && IsWin64) {
2946 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2947 // shadow reg if callee is a varargs function.
2948 unsigned ShadowReg = 0;
2949 switch (VA.getLocReg()) {
2950 case X86::XMM0: ShadowReg = X86::RCX; break;
2951 case X86::XMM1: ShadowReg = X86::RDX; break;
2952 case X86::XMM2: ShadowReg = X86::R8; break;
2953 case X86::XMM3: ShadowReg = X86::R9; break;
2956 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2958 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2959 assert(VA.isMemLoc());
2960 if (!StackPtr.getNode())
2961 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2963 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2964 dl, DAG, VA, Flags));
2968 if (!MemOpChains.empty())
2969 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2971 if (Subtarget->isPICStyleGOT()) {
2972 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2975 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2976 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2978 // If we are tail calling and generating PIC/GOT style code load the
2979 // address of the callee into ECX. The value in ecx is used as target of
2980 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2981 // for tail calls on PIC/GOT architectures. Normally we would just put the
2982 // address of GOT into ebx and then call target@PLT. But for tail calls
2983 // ebx would be restored (since ebx is callee saved) before jumping to the
2986 // Note: The actual moving to ECX is done further down.
2987 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2988 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2989 !G->getGlobal()->hasProtectedVisibility())
2990 Callee = LowerGlobalAddress(Callee, DAG);
2991 else if (isa<ExternalSymbolSDNode>(Callee))
2992 Callee = LowerExternalSymbol(Callee, DAG);
2996 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
2997 // From AMD64 ABI document:
2998 // For calls that may call functions that use varargs or stdargs
2999 // (prototype-less calls or calls to functions containing ellipsis (...) in
3000 // the declaration) %al is used as hidden argument to specify the number
3001 // of SSE registers used. The contents of %al do not need to match exactly
3002 // the number of registers, but must be an ubound on the number of SSE
3003 // registers used and is in the range 0 - 8 inclusive.
3005 // Count the number of XMM registers allocated.
3006 static const MCPhysReg XMMArgRegs[] = {
3007 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3008 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3010 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3011 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3012 && "SSE registers cannot be used when SSE is disabled");
3014 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3015 DAG.getConstant(NumXMMRegs, MVT::i8)));
3018 if (isVarArg && IsMustTail) {
3019 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3020 for (const auto &F : Forwards) {
3021 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3022 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3026 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3027 // don't need this because the eligibility check rejects calls that require
3028 // shuffling arguments passed in memory.
3029 if (!IsSibcall && isTailCall) {
3030 // Force all the incoming stack arguments to be loaded from the stack
3031 // before any new outgoing arguments are stored to the stack, because the
3032 // outgoing stack slots may alias the incoming argument stack slots, and
3033 // the alias isn't otherwise explicit. This is slightly more conservative
3034 // than necessary, because it means that each store effectively depends
3035 // on every argument instead of just those arguments it would clobber.
3036 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3038 SmallVector<SDValue, 8> MemOpChains2;
3041 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3042 CCValAssign &VA = ArgLocs[i];
3045 assert(VA.isMemLoc());
3046 SDValue Arg = OutVals[i];
3047 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3048 // Skip inalloca arguments. They don't require any work.
3049 if (Flags.isInAlloca())
3051 // Create frame index.
3052 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3053 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3054 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3055 FIN = DAG.getFrameIndex(FI, getPointerTy());
3057 if (Flags.isByVal()) {
3058 // Copy relative to framepointer.
3059 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3060 if (!StackPtr.getNode())
3061 StackPtr = DAG.getCopyFromReg(Chain, dl,
3062 RegInfo->getStackRegister(),
3064 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3066 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3070 // Store relative to framepointer.
3071 MemOpChains2.push_back(
3072 DAG.getStore(ArgChain, dl, Arg, FIN,
3073 MachinePointerInfo::getFixedStack(FI),
3078 if (!MemOpChains2.empty())
3079 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3081 // Store the return address to the appropriate stack slot.
3082 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3083 getPointerTy(), RegInfo->getSlotSize(),
3087 // Build a sequence of copy-to-reg nodes chained together with token chain
3088 // and flag operands which copy the outgoing args into registers.
3090 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3091 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3092 RegsToPass[i].second, InFlag);
3093 InFlag = Chain.getValue(1);
3096 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3097 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3098 // In the 64-bit large code model, we have to make all calls
3099 // through a register, since the call instruction's 32-bit
3100 // pc-relative offset may not be large enough to hold the whole
3102 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3103 // If the callee is a GlobalAddress node (quite common, every direct call
3104 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3106 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3108 // We should use extra load for direct calls to dllimported functions in
3110 const GlobalValue *GV = G->getGlobal();
3111 if (!GV->hasDLLImportStorageClass()) {
3112 unsigned char OpFlags = 0;
3113 bool ExtraLoad = false;
3114 unsigned WrapperKind = ISD::DELETED_NODE;
3116 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3117 // external symbols most go through the PLT in PIC mode. If the symbol
3118 // has hidden or protected visibility, or if it is static or local, then
3119 // we don't need to use the PLT - we can directly call it.
3120 if (Subtarget->isTargetELF() &&
3121 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3122 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3123 OpFlags = X86II::MO_PLT;
3124 } else if (Subtarget->isPICStyleStubAny() &&
3125 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3126 (!Subtarget->getTargetTriple().isMacOSX() ||
3127 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3128 // PC-relative references to external symbols should go through $stub,
3129 // unless we're building with the leopard linker or later, which
3130 // automatically synthesizes these stubs.
3131 OpFlags = X86II::MO_DARWIN_STUB;
3132 } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
3133 cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
3134 // If the function is marked as non-lazy, generate an indirect call
3135 // which loads from the GOT directly. This avoids runtime overhead
3136 // at the cost of eager binding (and one extra byte of encoding).
3137 OpFlags = X86II::MO_GOTPCREL;
3138 WrapperKind = X86ISD::WrapperRIP;
3142 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3143 G->getOffset(), OpFlags);
3145 // Add a wrapper if needed.
3146 if (WrapperKind != ISD::DELETED_NODE)
3147 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3148 // Add extra indirection if needed.
3150 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3151 MachinePointerInfo::getGOT(),
3152 false, false, false, 0);
3154 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3155 unsigned char OpFlags = 0;
3157 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3158 // external symbols should go through the PLT.
3159 if (Subtarget->isTargetELF() &&
3160 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3161 OpFlags = X86II::MO_PLT;
3162 } else if (Subtarget->isPICStyleStubAny() &&
3163 (!Subtarget->getTargetTriple().isMacOSX() ||
3164 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3165 // PC-relative references to external symbols should go through $stub,
3166 // unless we're building with the leopard linker or later, which
3167 // automatically synthesizes these stubs.
3168 OpFlags = X86II::MO_DARWIN_STUB;
3171 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3173 } else if (Subtarget->isTarget64BitILP32() &&
3174 Callee->getValueType(0) == MVT::i32) {
3175 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3176 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3179 // Returns a chain & a flag for retval copy to use.
3180 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3181 SmallVector<SDValue, 8> Ops;
3183 if (!IsSibcall && isTailCall) {
3184 Chain = DAG.getCALLSEQ_END(Chain,
3185 DAG.getIntPtrConstant(NumBytesToPop, true),
3186 DAG.getIntPtrConstant(0, true), InFlag, dl);
3187 InFlag = Chain.getValue(1);
3190 Ops.push_back(Chain);
3191 Ops.push_back(Callee);
3194 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3196 // Add argument registers to the end of the list so that they are known live
3198 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3199 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3200 RegsToPass[i].second.getValueType()));
3202 // Add a register mask operand representing the call-preserved registers.
3203 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3204 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3205 assert(Mask && "Missing call preserved mask for calling convention");
3206 Ops.push_back(DAG.getRegisterMask(Mask));
3208 if (InFlag.getNode())
3209 Ops.push_back(InFlag);
3213 //// If this is the first return lowered for this function, add the regs
3214 //// to the liveout set for the function.
3215 // This isn't right, although it's probably harmless on x86; liveouts
3216 // should be computed from returns not tail calls. Consider a void
3217 // function making a tail call to a function returning int.
3218 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3221 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3222 InFlag = Chain.getValue(1);
3224 // Create the CALLSEQ_END node.
3225 unsigned NumBytesForCalleeToPop;
3226 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3227 DAG.getTarget().Options.GuaranteedTailCallOpt))
3228 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3229 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3230 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3231 SR == StackStructReturn)
3232 // If this is a call to a struct-return function, the callee
3233 // pops the hidden struct pointer, so we have to push it back.
3234 // This is common for Darwin/X86, Linux & Mingw32 targets.
3235 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3236 NumBytesForCalleeToPop = 4;
3238 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3240 // Returns a flag for retval copy to use.
3242 Chain = DAG.getCALLSEQ_END(Chain,
3243 DAG.getIntPtrConstant(NumBytesToPop, true),
3244 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3247 InFlag = Chain.getValue(1);
3250 // Handle result values, copying them out of physregs into vregs that we
3252 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3253 Ins, dl, DAG, InVals);
3256 //===----------------------------------------------------------------------===//
3257 // Fast Calling Convention (tail call) implementation
3258 //===----------------------------------------------------------------------===//
3260 // Like std call, callee cleans arguments, convention except that ECX is
3261 // reserved for storing the tail called function address. Only 2 registers are
3262 // free for argument passing (inreg). Tail call optimization is performed
3264 // * tailcallopt is enabled
3265 // * caller/callee are fastcc
3266 // On X86_64 architecture with GOT-style position independent code only local
3267 // (within module) calls are supported at the moment.
3268 // To keep the stack aligned according to platform abi the function
3269 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3270 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3271 // If a tail called function callee has more arguments than the caller the
3272 // caller needs to make sure that there is room to move the RETADDR to. This is
3273 // achieved by reserving an area the size of the argument delta right after the
3274 // original RETADDR, but before the saved framepointer or the spilled registers
3275 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3287 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3288 /// for a 16 byte align requirement.
3290 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3291 SelectionDAG& DAG) const {
3292 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3293 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3294 unsigned StackAlignment = TFI.getStackAlignment();
3295 uint64_t AlignMask = StackAlignment - 1;
3296 int64_t Offset = StackSize;
3297 unsigned SlotSize = RegInfo->getSlotSize();
3298 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3299 // Number smaller than 12 so just add the difference.
3300 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3302 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3303 Offset = ((~AlignMask) & Offset) + StackAlignment +
3304 (StackAlignment-SlotSize);
3309 /// MatchingStackOffset - Return true if the given stack call argument is
3310 /// already available in the same position (relatively) of the caller's
3311 /// incoming argument stack.
3313 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3314 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3315 const X86InstrInfo *TII) {
3316 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3318 if (Arg.getOpcode() == ISD::CopyFromReg) {
3319 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3320 if (!TargetRegisterInfo::isVirtualRegister(VR))
3322 MachineInstr *Def = MRI->getVRegDef(VR);
3325 if (!Flags.isByVal()) {
3326 if (!TII->isLoadFromStackSlot(Def, FI))
3329 unsigned Opcode = Def->getOpcode();
3330 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3331 Opcode == X86::LEA64_32r) &&
3332 Def->getOperand(1).isFI()) {
3333 FI = Def->getOperand(1).getIndex();
3334 Bytes = Flags.getByValSize();
3338 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3339 if (Flags.isByVal())
3340 // ByVal argument is passed in as a pointer but it's now being
3341 // dereferenced. e.g.
3342 // define @foo(%struct.X* %A) {
3343 // tail call @bar(%struct.X* byval %A)
3346 SDValue Ptr = Ld->getBasePtr();
3347 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3350 FI = FINode->getIndex();
3351 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3352 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3353 FI = FINode->getIndex();
3354 Bytes = Flags.getByValSize();
3358 assert(FI != INT_MAX);
3359 if (!MFI->isFixedObjectIndex(FI))
3361 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3364 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3365 /// for tail call optimization. Targets which want to do tail call
3366 /// optimization should implement this function.
3368 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3369 CallingConv::ID CalleeCC,
3371 bool isCalleeStructRet,
3372 bool isCallerStructRet,
3374 const SmallVectorImpl<ISD::OutputArg> &Outs,
3375 const SmallVectorImpl<SDValue> &OutVals,
3376 const SmallVectorImpl<ISD::InputArg> &Ins,
3377 SelectionDAG &DAG) const {
3378 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3381 // If -tailcallopt is specified, make fastcc functions tail-callable.
3382 const MachineFunction &MF = DAG.getMachineFunction();
3383 const Function *CallerF = MF.getFunction();
3385 // If the function return type is x86_fp80 and the callee return type is not,
3386 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3387 // perform a tailcall optimization here.
3388 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3391 CallingConv::ID CallerCC = CallerF->getCallingConv();
3392 bool CCMatch = CallerCC == CalleeCC;
3393 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3394 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3396 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3397 if (IsTailCallConvention(CalleeCC) && CCMatch)
3402 // Look for obvious safe cases to perform tail call optimization that do not
3403 // require ABI changes. This is what gcc calls sibcall.
3405 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3406 // emit a special epilogue.
3407 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3408 if (RegInfo->needsStackRealignment(MF))
3411 // Also avoid sibcall optimization if either caller or callee uses struct
3412 // return semantics.
3413 if (isCalleeStructRet || isCallerStructRet)
3416 // An stdcall/thiscall caller is expected to clean up its arguments; the
3417 // callee isn't going to do that.
3418 // FIXME: this is more restrictive than needed. We could produce a tailcall
3419 // when the stack adjustment matches. For example, with a thiscall that takes
3420 // only one argument.
3421 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3422 CallerCC == CallingConv::X86_ThisCall))
3425 // Do not sibcall optimize vararg calls unless all arguments are passed via
3427 if (isVarArg && !Outs.empty()) {
3429 // Optimizing for varargs on Win64 is unlikely to be safe without
3430 // additional testing.
3431 if (IsCalleeWin64 || IsCallerWin64)
3434 SmallVector<CCValAssign, 16> ArgLocs;
3435 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3438 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3439 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3440 if (!ArgLocs[i].isRegLoc())
3444 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3445 // stack. Therefore, if it's not used by the call it is not safe to optimize
3446 // this into a sibcall.
3447 bool Unused = false;
3448 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3455 SmallVector<CCValAssign, 16> RVLocs;
3456 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3458 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3459 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3460 CCValAssign &VA = RVLocs[i];
3461 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3466 // If the calling conventions do not match, then we'd better make sure the
3467 // results are returned in the same way as what the caller expects.
3469 SmallVector<CCValAssign, 16> RVLocs1;
3470 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3472 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3474 SmallVector<CCValAssign, 16> RVLocs2;
3475 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3477 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3479 if (RVLocs1.size() != RVLocs2.size())
3481 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3482 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3484 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3486 if (RVLocs1[i].isRegLoc()) {
3487 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3490 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3496 // If the callee takes no arguments then go on to check the results of the
3498 if (!Outs.empty()) {
3499 // Check if stack adjustment is needed. For now, do not do this if any
3500 // argument is passed on the stack.
3501 SmallVector<CCValAssign, 16> ArgLocs;
3502 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3505 // Allocate shadow area for Win64
3507 CCInfo.AllocateStack(32, 8);
3509 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3510 if (CCInfo.getNextStackOffset()) {
3511 MachineFunction &MF = DAG.getMachineFunction();
3512 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3515 // Check if the arguments are already laid out in the right way as
3516 // the caller's fixed stack objects.
3517 MachineFrameInfo *MFI = MF.getFrameInfo();
3518 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3519 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3520 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3521 CCValAssign &VA = ArgLocs[i];
3522 SDValue Arg = OutVals[i];
3523 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3524 if (VA.getLocInfo() == CCValAssign::Indirect)
3526 if (!VA.isRegLoc()) {
3527 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3534 // If the tailcall address may be in a register, then make sure it's
3535 // possible to register allocate for it. In 32-bit, the call address can
3536 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3537 // callee-saved registers are restored. These happen to be the same
3538 // registers used to pass 'inreg' arguments so watch out for those.
3539 if (!Subtarget->is64Bit() &&
3540 ((!isa<GlobalAddressSDNode>(Callee) &&
3541 !isa<ExternalSymbolSDNode>(Callee)) ||
3542 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3543 unsigned NumInRegs = 0;
3544 // In PIC we need an extra register to formulate the address computation
3546 unsigned MaxInRegs =
3547 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3549 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3550 CCValAssign &VA = ArgLocs[i];
3553 unsigned Reg = VA.getLocReg();
3556 case X86::EAX: case X86::EDX: case X86::ECX:
3557 if (++NumInRegs == MaxInRegs)
3569 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3570 const TargetLibraryInfo *libInfo) const {
3571 return X86::createFastISel(funcInfo, libInfo);
3574 //===----------------------------------------------------------------------===//
3575 // Other Lowering Hooks
3576 //===----------------------------------------------------------------------===//
3578 static bool MayFoldLoad(SDValue Op) {
3579 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3582 static bool MayFoldIntoStore(SDValue Op) {
3583 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3586 static bool isTargetShuffle(unsigned Opcode) {
3588 default: return false;
3589 case X86ISD::BLENDI:
3590 case X86ISD::PSHUFB:
3591 case X86ISD::PSHUFD:
3592 case X86ISD::PSHUFHW:
3593 case X86ISD::PSHUFLW:
3595 case X86ISD::PALIGNR:
3596 case X86ISD::MOVLHPS:
3597 case X86ISD::MOVLHPD:
3598 case X86ISD::MOVHLPS:
3599 case X86ISD::MOVLPS:
3600 case X86ISD::MOVLPD:
3601 case X86ISD::MOVSHDUP:
3602 case X86ISD::MOVSLDUP:
3603 case X86ISD::MOVDDUP:
3606 case X86ISD::UNPCKL:
3607 case X86ISD::UNPCKH:
3608 case X86ISD::VPERMILPI:
3609 case X86ISD::VPERM2X128:
3610 case X86ISD::VPERMI:
3615 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3616 SDValue V1, SelectionDAG &DAG) {
3618 default: llvm_unreachable("Unknown x86 shuffle node");
3619 case X86ISD::MOVSHDUP:
3620 case X86ISD::MOVSLDUP:
3621 case X86ISD::MOVDDUP:
3622 return DAG.getNode(Opc, dl, VT, V1);
3626 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3627 SDValue V1, unsigned TargetMask,
3628 SelectionDAG &DAG) {
3630 default: llvm_unreachable("Unknown x86 shuffle node");
3631 case X86ISD::PSHUFD:
3632 case X86ISD::PSHUFHW:
3633 case X86ISD::PSHUFLW:
3634 case X86ISD::VPERMILPI:
3635 case X86ISD::VPERMI:
3636 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3640 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3641 SDValue V1, SDValue V2, unsigned TargetMask,
3642 SelectionDAG &DAG) {
3644 default: llvm_unreachable("Unknown x86 shuffle node");
3645 case X86ISD::PALIGNR:
3646 case X86ISD::VALIGN:
3648 case X86ISD::VPERM2X128:
3649 return DAG.getNode(Opc, dl, VT, V1, V2,
3650 DAG.getConstant(TargetMask, MVT::i8));
3654 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3655 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3657 default: llvm_unreachable("Unknown x86 shuffle node");
3658 case X86ISD::MOVLHPS:
3659 case X86ISD::MOVLHPD:
3660 case X86ISD::MOVHLPS:
3661 case X86ISD::MOVLPS:
3662 case X86ISD::MOVLPD:
3665 case X86ISD::UNPCKL:
3666 case X86ISD::UNPCKH:
3667 return DAG.getNode(Opc, dl, VT, V1, V2);
3671 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3672 MachineFunction &MF = DAG.getMachineFunction();
3673 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3674 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3675 int ReturnAddrIndex = FuncInfo->getRAIndex();
3677 if (ReturnAddrIndex == 0) {
3678 // Set up a frame object for the return address.
3679 unsigned SlotSize = RegInfo->getSlotSize();
3680 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3683 FuncInfo->setRAIndex(ReturnAddrIndex);
3686 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3689 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3690 bool hasSymbolicDisplacement) {
3691 // Offset should fit into 32 bit immediate field.
3692 if (!isInt<32>(Offset))
3695 // If we don't have a symbolic displacement - we don't have any extra
3697 if (!hasSymbolicDisplacement)
3700 // FIXME: Some tweaks might be needed for medium code model.
3701 if (M != CodeModel::Small && M != CodeModel::Kernel)
3704 // For small code model we assume that latest object is 16MB before end of 31
3705 // bits boundary. We may also accept pretty large negative constants knowing
3706 // that all objects are in the positive half of address space.
3707 if (M == CodeModel::Small && Offset < 16*1024*1024)
3710 // For kernel code model we know that all object resist in the negative half
3711 // of 32bits address space. We may not accept negative offsets, since they may
3712 // be just off and we may accept pretty large positive ones.
3713 if (M == CodeModel::Kernel && Offset >= 0)
3719 /// isCalleePop - Determines whether the callee is required to pop its
3720 /// own arguments. Callee pop is necessary to support tail calls.
3721 bool X86::isCalleePop(CallingConv::ID CallingConv,
3722 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3723 switch (CallingConv) {
3726 case CallingConv::X86_StdCall:
3727 case CallingConv::X86_FastCall:
3728 case CallingConv::X86_ThisCall:
3730 case CallingConv::Fast:
3731 case CallingConv::GHC:
3732 case CallingConv::HiPE:
3739 /// \brief Return true if the condition is an unsigned comparison operation.
3740 static bool isX86CCUnsigned(unsigned X86CC) {
3742 default: llvm_unreachable("Invalid integer condition!");
3743 case X86::COND_E: return true;
3744 case X86::COND_G: return false;
3745 case X86::COND_GE: return false;
3746 case X86::COND_L: return false;
3747 case X86::COND_LE: return false;
3748 case X86::COND_NE: return true;
3749 case X86::COND_B: return true;
3750 case X86::COND_A: return true;
3751 case X86::COND_BE: return true;
3752 case X86::COND_AE: return true;
3754 llvm_unreachable("covered switch fell through?!");
3757 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3758 /// specific condition code, returning the condition code and the LHS/RHS of the
3759 /// comparison to make.
3760 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3761 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3763 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3764 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3765 // X > -1 -> X == 0, jump !sign.
3766 RHS = DAG.getConstant(0, RHS.getValueType());
3767 return X86::COND_NS;
3769 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3770 // X < 0 -> X == 0, jump on sign.
3773 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3775 RHS = DAG.getConstant(0, RHS.getValueType());
3776 return X86::COND_LE;
3780 switch (SetCCOpcode) {
3781 default: llvm_unreachable("Invalid integer condition!");
3782 case ISD::SETEQ: return X86::COND_E;
3783 case ISD::SETGT: return X86::COND_G;
3784 case ISD::SETGE: return X86::COND_GE;
3785 case ISD::SETLT: return X86::COND_L;
3786 case ISD::SETLE: return X86::COND_LE;
3787 case ISD::SETNE: return X86::COND_NE;
3788 case ISD::SETULT: return X86::COND_B;
3789 case ISD::SETUGT: return X86::COND_A;
3790 case ISD::SETULE: return X86::COND_BE;
3791 case ISD::SETUGE: return X86::COND_AE;
3795 // First determine if it is required or is profitable to flip the operands.
3797 // If LHS is a foldable load, but RHS is not, flip the condition.
3798 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3799 !ISD::isNON_EXTLoad(RHS.getNode())) {
3800 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3801 std::swap(LHS, RHS);
3804 switch (SetCCOpcode) {
3810 std::swap(LHS, RHS);
3814 // On a floating point condition, the flags are set as follows:
3816 // 0 | 0 | 0 | X > Y
3817 // 0 | 0 | 1 | X < Y
3818 // 1 | 0 | 0 | X == Y
3819 // 1 | 1 | 1 | unordered
3820 switch (SetCCOpcode) {
3821 default: llvm_unreachable("Condcode should be pre-legalized away");
3823 case ISD::SETEQ: return X86::COND_E;
3824 case ISD::SETOLT: // flipped
3826 case ISD::SETGT: return X86::COND_A;
3827 case ISD::SETOLE: // flipped
3829 case ISD::SETGE: return X86::COND_AE;
3830 case ISD::SETUGT: // flipped
3832 case ISD::SETLT: return X86::COND_B;
3833 case ISD::SETUGE: // flipped
3835 case ISD::SETLE: return X86::COND_BE;
3837 case ISD::SETNE: return X86::COND_NE;
3838 case ISD::SETUO: return X86::COND_P;
3839 case ISD::SETO: return X86::COND_NP;
3841 case ISD::SETUNE: return X86::COND_INVALID;
3845 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3846 /// code. Current x86 isa includes the following FP cmov instructions:
3847 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3848 static bool hasFPCMov(unsigned X86CC) {
3864 /// isFPImmLegal - Returns true if the target can instruction select the
3865 /// specified FP immediate natively. If false, the legalizer will
3866 /// materialize the FP immediate as a load from a constant pool.
3867 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3868 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3869 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3875 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3876 ISD::LoadExtType ExtTy,
3878 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3879 // relocation target a movq or addq instruction: don't let the load shrink.
3880 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3881 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3882 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3883 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3887 /// \brief Returns true if it is beneficial to convert a load of a constant
3888 /// to just the constant itself.
3889 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3891 assert(Ty->isIntegerTy());
3893 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3894 if (BitSize == 0 || BitSize > 64)
3899 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3900 unsigned Index) const {
3901 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3904 return (Index == 0 || Index == ResVT.getVectorNumElements());
3907 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3908 // Speculate cttz only if we can directly use TZCNT.
3909 return Subtarget->hasBMI();
3912 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3913 // Speculate ctlz only if we can directly use LZCNT.
3914 return Subtarget->hasLZCNT();
3917 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3918 /// the specified range (L, H].
3919 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3920 return (Val < 0) || (Val >= Low && Val < Hi);
3923 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3924 /// specified value.
3925 static bool isUndefOrEqual(int Val, int CmpVal) {
3926 return (Val < 0 || Val == CmpVal);
3929 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3930 /// from position Pos and ending in Pos+Size, falls within the specified
3931 /// sequential range (Low, Low+Size]. or is undef.
3932 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3933 unsigned Pos, unsigned Size, int Low) {
3934 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3935 if (!isUndefOrEqual(Mask[i], Low))
3940 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3941 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3942 /// operand - by default will match for first operand.
3943 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3944 bool TestSecondOperand = false) {
3945 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3946 VT != MVT::v2f64 && VT != MVT::v2i64)
3949 unsigned NumElems = VT.getVectorNumElements();
3950 unsigned Lo = TestSecondOperand ? NumElems : 0;
3951 unsigned Hi = Lo + NumElems;
3953 for (unsigned i = 0; i < NumElems; ++i)
3954 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3960 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3961 /// is suitable for input to PSHUFHW.
3962 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3963 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3966 // Lower quadword copied in order or undef.
3967 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3970 // Upper quadword shuffled.
3971 for (unsigned i = 4; i != 8; ++i)
3972 if (!isUndefOrInRange(Mask[i], 4, 8))
3975 if (VT == MVT::v16i16) {
3976 // Lower quadword copied in order or undef.
3977 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3980 // Upper quadword shuffled.
3981 for (unsigned i = 12; i != 16; ++i)
3982 if (!isUndefOrInRange(Mask[i], 12, 16))
3989 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3990 /// is suitable for input to PSHUFLW.
3991 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3992 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3995 // Upper quadword copied in order.
3996 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
3999 // Lower quadword shuffled.
4000 for (unsigned i = 0; i != 4; ++i)
4001 if (!isUndefOrInRange(Mask[i], 0, 4))
4004 if (VT == MVT::v16i16) {
4005 // Upper quadword copied in order.
4006 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4009 // Lower quadword shuffled.
4010 for (unsigned i = 8; i != 12; ++i)
4011 if (!isUndefOrInRange(Mask[i], 8, 12))
4018 /// \brief Return true if the mask specifies a shuffle of elements that is
4019 /// suitable for input to intralane (palignr) or interlane (valign) vector
4021 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4022 unsigned NumElts = VT.getVectorNumElements();
4023 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4024 unsigned NumLaneElts = NumElts/NumLanes;
4026 // Do not handle 64-bit element shuffles with palignr.
4027 if (NumLaneElts == 2)
4030 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4032 for (i = 0; i != NumLaneElts; ++i) {
4037 // Lane is all undef, go to next lane
4038 if (i == NumLaneElts)
4041 int Start = Mask[i+l];
4043 // Make sure its in this lane in one of the sources
4044 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4045 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4048 // If not lane 0, then we must match lane 0
4049 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4052 // Correct second source to be contiguous with first source
4053 if (Start >= (int)NumElts)
4054 Start -= NumElts - NumLaneElts;
4056 // Make sure we're shifting in the right direction.
4057 if (Start <= (int)(i+l))
4062 // Check the rest of the elements to see if they are consecutive.
4063 for (++i; i != NumLaneElts; ++i) {
4064 int Idx = Mask[i+l];
4066 // Make sure its in this lane
4067 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4068 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4071 // If not lane 0, then we must match lane 0
4072 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4075 if (Idx >= (int)NumElts)
4076 Idx -= NumElts - NumLaneElts;
4078 if (!isUndefOrEqual(Idx, Start+i))
4087 /// \brief Return true if the node specifies a shuffle of elements that is
4088 /// suitable for input to PALIGNR.
4089 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4090 const X86Subtarget *Subtarget) {
4091 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4092 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4093 VT.is512BitVector())
4094 // FIXME: Add AVX512BW.
4097 return isAlignrMask(Mask, VT, false);
4100 /// \brief Return true if the node specifies a shuffle of elements that is
4101 /// suitable for input to VALIGN.
4102 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4103 const X86Subtarget *Subtarget) {
4104 // FIXME: Add AVX512VL.
4105 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4107 return isAlignrMask(Mask, VT, true);
4110 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4111 /// the two vector operands have swapped position.
4112 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4113 unsigned NumElems) {
4114 for (unsigned i = 0; i != NumElems; ++i) {
4118 else if (idx < (int)NumElems)
4119 Mask[i] = idx + NumElems;
4121 Mask[i] = idx - NumElems;
4125 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4126 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4127 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4128 /// reverse of what x86 shuffles want.
4129 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4131 unsigned NumElems = VT.getVectorNumElements();
4132 unsigned NumLanes = VT.getSizeInBits()/128;
4133 unsigned NumLaneElems = NumElems/NumLanes;
4135 if (NumLaneElems != 2 && NumLaneElems != 4)
4138 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4139 bool symmetricMaskRequired =
4140 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4142 // VSHUFPSY divides the resulting vector into 4 chunks.
4143 // The sources are also splitted into 4 chunks, and each destination
4144 // chunk must come from a different source chunk.
4146 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4147 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4149 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4150 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4152 // VSHUFPDY divides the resulting vector into 4 chunks.
4153 // The sources are also splitted into 4 chunks, and each destination
4154 // chunk must come from a different source chunk.
4156 // SRC1 => X3 X2 X1 X0
4157 // SRC2 => Y3 Y2 Y1 Y0
4159 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4161 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4162 unsigned HalfLaneElems = NumLaneElems/2;
4163 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4164 for (unsigned i = 0; i != NumLaneElems; ++i) {
4165 int Idx = Mask[i+l];
4166 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4167 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4169 // For VSHUFPSY, the mask of the second half must be the same as the
4170 // first but with the appropriate offsets. This works in the same way as
4171 // VPERMILPS works with masks.
4172 if (!symmetricMaskRequired || Idx < 0)
4174 if (MaskVal[i] < 0) {
4175 MaskVal[i] = Idx - l;
4178 if ((signed)(Idx - l) != MaskVal[i])
4186 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4187 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4188 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4189 if (!VT.is128BitVector())
4192 unsigned NumElems = VT.getVectorNumElements();
4197 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4198 return isUndefOrEqual(Mask[0], 6) &&
4199 isUndefOrEqual(Mask[1], 7) &&
4200 isUndefOrEqual(Mask[2], 2) &&
4201 isUndefOrEqual(Mask[3], 3);
4204 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4205 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4207 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4208 if (!VT.is128BitVector())
4211 unsigned NumElems = VT.getVectorNumElements();
4216 return isUndefOrEqual(Mask[0], 2) &&
4217 isUndefOrEqual(Mask[1], 3) &&
4218 isUndefOrEqual(Mask[2], 2) &&
4219 isUndefOrEqual(Mask[3], 3);
4222 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4223 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4224 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4225 if (!VT.is128BitVector())
4228 unsigned NumElems = VT.getVectorNumElements();
4230 if (NumElems != 2 && NumElems != 4)
4233 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4234 if (!isUndefOrEqual(Mask[i], i + NumElems))
4237 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4238 if (!isUndefOrEqual(Mask[i], i))
4244 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4245 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4246 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4247 if (!VT.is128BitVector())
4250 unsigned NumElems = VT.getVectorNumElements();
4252 if (NumElems != 2 && NumElems != 4)
4255 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4256 if (!isUndefOrEqual(Mask[i], i))
4259 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4260 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4266 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4267 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4268 /// i. e: If all but one element come from the same vector.
4269 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4270 // TODO: Deal with AVX's VINSERTPS
4271 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4274 unsigned CorrectPosV1 = 0;
4275 unsigned CorrectPosV2 = 0;
4276 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4277 if (Mask[i] == -1) {
4285 else if (Mask[i] == i + 4)
4289 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4290 // We have 3 elements (undefs count as elements from any vector) from one
4291 // vector, and one from another.
4298 // Some special combinations that can be optimized.
4301 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4302 SelectionDAG &DAG) {
4303 MVT VT = SVOp->getSimpleValueType(0);
4306 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4309 ArrayRef<int> Mask = SVOp->getMask();
4311 // These are the special masks that may be optimized.
4312 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4313 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4314 bool MatchEvenMask = true;
4315 bool MatchOddMask = true;
4316 for (int i=0; i<8; ++i) {
4317 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4318 MatchEvenMask = false;
4319 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4320 MatchOddMask = false;
4323 if (!MatchEvenMask && !MatchOddMask)
4326 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4328 SDValue Op0 = SVOp->getOperand(0);
4329 SDValue Op1 = SVOp->getOperand(1);
4331 if (MatchEvenMask) {
4332 // Shift the second operand right to 32 bits.
4333 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4334 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4336 // Shift the first operand left to 32 bits.
4337 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4338 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4340 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4341 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4344 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4345 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4346 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4347 bool HasInt256, bool V2IsSplat = false) {
4349 assert(VT.getSizeInBits() >= 128 &&
4350 "Unsupported vector type for unpckl");
4352 unsigned NumElts = VT.getVectorNumElements();
4353 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4354 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4357 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4358 "Unsupported vector type for unpckh");
4360 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4361 unsigned NumLanes = VT.getSizeInBits()/128;
4362 unsigned NumLaneElts = NumElts/NumLanes;
4364 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4365 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4366 int BitI = Mask[l+i];
4367 int BitI1 = Mask[l+i+1];
4368 if (!isUndefOrEqual(BitI, j))
4371 if (!isUndefOrEqual(BitI1, NumElts))
4374 if (!isUndefOrEqual(BitI1, j + NumElts))
4383 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4384 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4385 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4386 bool HasInt256, bool V2IsSplat = false) {
4387 assert(VT.getSizeInBits() >= 128 &&
4388 "Unsupported vector type for unpckh");
4390 unsigned NumElts = VT.getVectorNumElements();
4391 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4392 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4395 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4396 "Unsupported vector type for unpckh");
4398 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4399 unsigned NumLanes = VT.getSizeInBits()/128;
4400 unsigned NumLaneElts = NumElts/NumLanes;
4402 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4403 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4404 int BitI = Mask[l+i];
4405 int BitI1 = Mask[l+i+1];
4406 if (!isUndefOrEqual(BitI, j))
4409 if (isUndefOrEqual(BitI1, NumElts))
4412 if (!isUndefOrEqual(BitI1, j+NumElts))
4420 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4421 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4423 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4424 unsigned NumElts = VT.getVectorNumElements();
4425 bool Is256BitVec = VT.is256BitVector();
4427 if (VT.is512BitVector())
4429 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4430 "Unsupported vector type for unpckh");
4432 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4433 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4436 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4437 // FIXME: Need a better way to get rid of this, there's no latency difference
4438 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4439 // the former later. We should also remove the "_undef" special mask.
4440 if (NumElts == 4 && Is256BitVec)
4443 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4444 // independently on 128-bit lanes.
4445 unsigned NumLanes = VT.getSizeInBits()/128;
4446 unsigned NumLaneElts = NumElts/NumLanes;
4448 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4449 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4450 int BitI = Mask[l+i];
4451 int BitI1 = Mask[l+i+1];
4453 if (!isUndefOrEqual(BitI, j))
4455 if (!isUndefOrEqual(BitI1, j))
4463 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4464 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4466 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4467 unsigned NumElts = VT.getVectorNumElements();
4469 if (VT.is512BitVector())
4472 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4473 "Unsupported vector type for unpckh");
4475 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4476 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4479 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4480 // independently on 128-bit lanes.
4481 unsigned NumLanes = VT.getSizeInBits()/128;
4482 unsigned NumLaneElts = NumElts/NumLanes;
4484 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4485 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4486 int BitI = Mask[l+i];
4487 int BitI1 = Mask[l+i+1];
4488 if (!isUndefOrEqual(BitI, j))
4490 if (!isUndefOrEqual(BitI1, j))
4497 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4498 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4499 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4500 if (!VT.is512BitVector())
4503 unsigned NumElts = VT.getVectorNumElements();
4504 unsigned HalfSize = NumElts/2;
4505 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4506 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4511 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4512 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4520 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4521 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4522 /// MOVSD, and MOVD, i.e. setting the lowest element.
4523 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4524 if (VT.getVectorElementType().getSizeInBits() < 32)
4526 if (!VT.is128BitVector())
4529 unsigned NumElts = VT.getVectorNumElements();
4531 if (!isUndefOrEqual(Mask[0], NumElts))
4534 for (unsigned i = 1; i != NumElts; ++i)
4535 if (!isUndefOrEqual(Mask[i], i))
4541 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4542 /// as permutations between 128-bit chunks or halves. As an example: this
4544 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4545 /// The first half comes from the second half of V1 and the second half from the
4546 /// the second half of V2.
4547 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4548 if (!HasFp256 || !VT.is256BitVector())
4551 // The shuffle result is divided into half A and half B. In total the two
4552 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4553 // B must come from C, D, E or F.
4554 unsigned HalfSize = VT.getVectorNumElements()/2;
4555 bool MatchA = false, MatchB = false;
4557 // Check if A comes from one of C, D, E, F.
4558 for (unsigned Half = 0; Half != 4; ++Half) {
4559 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4565 // Check if B comes from one of C, D, E, F.
4566 for (unsigned Half = 0; Half != 4; ++Half) {
4567 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4573 return MatchA && MatchB;
4576 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4577 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4578 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4579 MVT VT = SVOp->getSimpleValueType(0);
4581 unsigned HalfSize = VT.getVectorNumElements()/2;
4583 unsigned FstHalf = 0, SndHalf = 0;
4584 for (unsigned i = 0; i < HalfSize; ++i) {
4585 if (SVOp->getMaskElt(i) > 0) {
4586 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4590 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4591 if (SVOp->getMaskElt(i) > 0) {
4592 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4597 return (FstHalf | (SndHalf << 4));
4600 // Symmetric in-lane mask. Each lane has 4 elements (for imm8)
4601 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4602 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4606 unsigned NumElts = VT.getVectorNumElements();
4608 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4609 for (unsigned i = 0; i != NumElts; ++i) {
4612 Imm8 |= Mask[i] << (i*2);
4617 unsigned LaneSize = 4;
4618 SmallVector<int, 4> MaskVal(LaneSize, -1);
4620 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4621 for (unsigned i = 0; i != LaneSize; ++i) {
4622 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4626 if (MaskVal[i] < 0) {
4627 MaskVal[i] = Mask[i+l] - l;
4628 Imm8 |= MaskVal[i] << (i*2);
4631 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4638 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4639 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4640 /// Note that VPERMIL mask matching is different depending whether theunderlying
4641 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4642 /// to the same elements of the low, but to the higher half of the source.
4643 /// In VPERMILPD the two lanes could be shuffled independently of each other
4644 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4645 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4646 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4647 if (VT.getSizeInBits() < 256 || EltSize < 32)
4649 bool symmetricMaskRequired = (EltSize == 32);
4650 unsigned NumElts = VT.getVectorNumElements();
4652 unsigned NumLanes = VT.getSizeInBits()/128;
4653 unsigned LaneSize = NumElts/NumLanes;
4654 // 2 or 4 elements in one lane
4656 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4657 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4658 for (unsigned i = 0; i != LaneSize; ++i) {
4659 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4661 if (symmetricMaskRequired) {
4662 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4663 ExpectedMaskVal[i] = Mask[i+l] - l;
4666 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4674 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4675 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4676 /// element of vector 2 and the other elements to come from vector 1 in order.
4677 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4678 bool V2IsSplat = false, bool V2IsUndef = false) {
4679 if (!VT.is128BitVector())
4682 unsigned NumOps = VT.getVectorNumElements();
4683 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4686 if (!isUndefOrEqual(Mask[0], 0))
4689 for (unsigned i = 1; i != NumOps; ++i)
4690 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4691 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4692 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4698 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4699 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4700 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4701 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4702 const X86Subtarget *Subtarget) {
4703 if (!Subtarget->hasSSE3())
4706 unsigned NumElems = VT.getVectorNumElements();
4708 if ((VT.is128BitVector() && NumElems != 4) ||
4709 (VT.is256BitVector() && NumElems != 8) ||
4710 (VT.is512BitVector() && NumElems != 16))
4713 // "i+1" is the value the indexed mask element must have
4714 for (unsigned i = 0; i != NumElems; i += 2)
4715 if (!isUndefOrEqual(Mask[i], i+1) ||
4716 !isUndefOrEqual(Mask[i+1], i+1))
4722 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4723 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4724 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4725 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4726 const X86Subtarget *Subtarget) {
4727 if (!Subtarget->hasSSE3())
4730 unsigned NumElems = VT.getVectorNumElements();
4732 if ((VT.is128BitVector() && NumElems != 4) ||
4733 (VT.is256BitVector() && NumElems != 8) ||
4734 (VT.is512BitVector() && NumElems != 16))
4737 // "i" is the value the indexed mask element must have
4738 for (unsigned i = 0; i != NumElems; i += 2)
4739 if (!isUndefOrEqual(Mask[i], i) ||
4740 !isUndefOrEqual(Mask[i+1], i))
4746 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4747 /// specifies a shuffle of elements that is suitable for input to 256-bit
4748 /// version of MOVDDUP.
4749 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4750 if (!HasFp256 || !VT.is256BitVector())
4753 unsigned NumElts = VT.getVectorNumElements();
4757 for (unsigned i = 0; i != NumElts/2; ++i)
4758 if (!isUndefOrEqual(Mask[i], 0))
4760 for (unsigned i = NumElts/2; i != NumElts; ++i)
4761 if (!isUndefOrEqual(Mask[i], NumElts/2))
4766 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4767 /// specifies a shuffle of elements that is suitable for input to 128-bit
4768 /// version of MOVDDUP.
4769 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4770 if (!VT.is128BitVector())
4773 unsigned e = VT.getVectorNumElements() / 2;
4774 for (unsigned i = 0; i != e; ++i)
4775 if (!isUndefOrEqual(Mask[i], i))
4777 for (unsigned i = 0; i != e; ++i)
4778 if (!isUndefOrEqual(Mask[e+i], i))
4783 /// isVEXTRACTIndex - Return true if the specified
4784 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4785 /// suitable for instruction that extract 128 or 256 bit vectors
4786 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4787 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4788 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4791 // The index should be aligned on a vecWidth-bit boundary.
4793 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4795 MVT VT = N->getSimpleValueType(0);
4796 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4797 bool Result = (Index * ElSize) % vecWidth == 0;
4802 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4803 /// operand specifies a subvector insert that is suitable for input to
4804 /// insertion of 128 or 256-bit subvectors
4805 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4806 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4807 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4809 // The index should be aligned on a vecWidth-bit boundary.
4811 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4813 MVT VT = N->getSimpleValueType(0);
4814 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4815 bool Result = (Index * ElSize) % vecWidth == 0;
4820 bool X86::isVINSERT128Index(SDNode *N) {
4821 return isVINSERTIndex(N, 128);
4824 bool X86::isVINSERT256Index(SDNode *N) {
4825 return isVINSERTIndex(N, 256);
4828 bool X86::isVEXTRACT128Index(SDNode *N) {
4829 return isVEXTRACTIndex(N, 128);
4832 bool X86::isVEXTRACT256Index(SDNode *N) {
4833 return isVEXTRACTIndex(N, 256);
4836 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4837 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4838 /// Handles 128-bit and 256-bit.
4839 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4840 MVT VT = N->getSimpleValueType(0);
4842 assert((VT.getSizeInBits() >= 128) &&
4843 "Unsupported vector type for PSHUF/SHUFP");
4845 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4846 // independently on 128-bit lanes.
4847 unsigned NumElts = VT.getVectorNumElements();
4848 unsigned NumLanes = VT.getSizeInBits()/128;
4849 unsigned NumLaneElts = NumElts/NumLanes;
4851 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4852 "Only supports 2, 4 or 8 elements per lane");
4854 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4856 for (unsigned i = 0; i != NumElts; ++i) {
4857 int Elt = N->getMaskElt(i);
4858 if (Elt < 0) continue;
4859 Elt &= NumLaneElts - 1;
4860 unsigned ShAmt = (i << Shift) % 8;
4861 Mask |= Elt << ShAmt;
4867 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4868 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4869 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4870 MVT VT = N->getSimpleValueType(0);
4872 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4873 "Unsupported vector type for PSHUFHW");
4875 unsigned NumElts = VT.getVectorNumElements();
4878 for (unsigned l = 0; l != NumElts; l += 8) {
4879 // 8 nodes per lane, but we only care about the last 4.
4880 for (unsigned i = 0; i < 4; ++i) {
4881 int Elt = N->getMaskElt(l+i+4);
4882 if (Elt < 0) continue;
4883 Elt &= 0x3; // only 2-bits.
4884 Mask |= Elt << (i * 2);
4891 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4892 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4893 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4894 MVT VT = N->getSimpleValueType(0);
4896 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4897 "Unsupported vector type for PSHUFHW");
4899 unsigned NumElts = VT.getVectorNumElements();
4902 for (unsigned l = 0; l != NumElts; l += 8) {
4903 // 8 nodes per lane, but we only care about the first 4.
4904 for (unsigned i = 0; i < 4; ++i) {
4905 int Elt = N->getMaskElt(l+i);
4906 if (Elt < 0) continue;
4907 Elt &= 0x3; // only 2-bits
4908 Mask |= Elt << (i * 2);
4915 /// \brief Return the appropriate immediate to shuffle the specified
4916 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4917 /// VALIGN (if Interlane is true) instructions.
4918 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4920 MVT VT = SVOp->getSimpleValueType(0);
4921 unsigned EltSize = InterLane ? 1 :
4922 VT.getVectorElementType().getSizeInBits() >> 3;
4924 unsigned NumElts = VT.getVectorNumElements();
4925 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4926 unsigned NumLaneElts = NumElts/NumLanes;
4930 for (i = 0; i != NumElts; ++i) {
4931 Val = SVOp->getMaskElt(i);
4935 if (Val >= (int)NumElts)
4936 Val -= NumElts - NumLaneElts;
4938 assert(Val - i > 0 && "PALIGNR imm should be positive");
4939 return (Val - i) * EltSize;
4942 /// \brief Return the appropriate immediate to shuffle the specified
4943 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4944 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4945 return getShuffleAlignrImmediate(SVOp, false);
4948 /// \brief Return the appropriate immediate to shuffle the specified
4949 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4950 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4951 return getShuffleAlignrImmediate(SVOp, true);
4955 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4956 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4957 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4958 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4961 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4963 MVT VecVT = N->getOperand(0).getSimpleValueType();
4964 MVT ElVT = VecVT.getVectorElementType();
4966 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4967 return Index / NumElemsPerChunk;
4970 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4971 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4972 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4973 llvm_unreachable("Illegal insert subvector for VINSERT");
4976 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4978 MVT VecVT = N->getSimpleValueType(0);
4979 MVT ElVT = VecVT.getVectorElementType();
4981 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4982 return Index / NumElemsPerChunk;
4985 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4986 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4987 /// and VINSERTI128 instructions.
4988 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4989 return getExtractVEXTRACTImmediate(N, 128);
4992 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
4993 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
4994 /// and VINSERTI64x4 instructions.
4995 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
4996 return getExtractVEXTRACTImmediate(N, 256);
4999 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5000 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5001 /// and VINSERTI128 instructions.
5002 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5003 return getInsertVINSERTImmediate(N, 128);
5006 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5007 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5008 /// and VINSERTI64x4 instructions.
5009 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5010 return getInsertVINSERTImmediate(N, 256);
5013 /// isZero - Returns true if Elt is a constant integer zero
5014 static bool isZero(SDValue V) {
5015 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5016 return C && C->isNullValue();
5019 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5021 bool X86::isZeroNode(SDValue Elt) {
5024 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5025 return CFP->getValueAPF().isPosZero();
5029 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5030 /// match movhlps. The lower half elements should come from upper half of
5031 /// V1 (and in order), and the upper half elements should come from the upper
5032 /// half of V2 (and in order).
5033 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5034 if (!VT.is128BitVector())
5036 if (VT.getVectorNumElements() != 4)
5038 for (unsigned i = 0, e = 2; i != e; ++i)
5039 if (!isUndefOrEqual(Mask[i], i+2))
5041 for (unsigned i = 2; i != 4; ++i)
5042 if (!isUndefOrEqual(Mask[i], i+4))
5047 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5048 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5050 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5051 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5053 N = N->getOperand(0).getNode();
5054 if (!ISD::isNON_EXTLoad(N))
5057 *LD = cast<LoadSDNode>(N);
5061 // Test whether the given value is a vector value which will be legalized
5063 static bool WillBeConstantPoolLoad(SDNode *N) {
5064 if (N->getOpcode() != ISD::BUILD_VECTOR)
5067 // Check for any non-constant elements.
5068 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5069 switch (N->getOperand(i).getNode()->getOpcode()) {
5071 case ISD::ConstantFP:
5078 // Vectors of all-zeros and all-ones are materialized with special
5079 // instructions rather than being loaded.
5080 return !ISD::isBuildVectorAllZeros(N) &&
5081 !ISD::isBuildVectorAllOnes(N);
5084 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5085 /// match movlp{s|d}. The lower half elements should come from lower half of
5086 /// V1 (and in order), and the upper half elements should come from the upper
5087 /// half of V2 (and in order). And since V1 will become the source of the
5088 /// MOVLP, it must be either a vector load or a scalar load to vector.
5089 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5090 ArrayRef<int> Mask, MVT VT) {
5091 if (!VT.is128BitVector())
5094 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5096 // Is V2 is a vector load, don't do this transformation. We will try to use
5097 // load folding shufps op.
5098 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5101 unsigned NumElems = VT.getVectorNumElements();
5103 if (NumElems != 2 && NumElems != 4)
5105 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5106 if (!isUndefOrEqual(Mask[i], i))
5108 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5109 if (!isUndefOrEqual(Mask[i], i+NumElems))
5114 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5115 /// to an zero vector.
5116 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5117 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5118 SDValue V1 = N->getOperand(0);
5119 SDValue V2 = N->getOperand(1);
5120 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5121 for (unsigned i = 0; i != NumElems; ++i) {
5122 int Idx = N->getMaskElt(i);
5123 if (Idx >= (int)NumElems) {
5124 unsigned Opc = V2.getOpcode();
5125 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5127 if (Opc != ISD::BUILD_VECTOR ||
5128 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5130 } else if (Idx >= 0) {
5131 unsigned Opc = V1.getOpcode();
5132 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5134 if (Opc != ISD::BUILD_VECTOR ||
5135 !X86::isZeroNode(V1.getOperand(Idx)))
5142 /// getZeroVector - Returns a vector of specified type with all zero elements.
5144 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5145 SelectionDAG &DAG, SDLoc dl) {
5146 assert(VT.isVector() && "Expected a vector type");
5148 // Always build SSE zero vectors as <4 x i32> bitcasted
5149 // to their dest type. This ensures they get CSE'd.
5151 if (VT.is128BitVector()) { // SSE
5152 if (Subtarget->hasSSE2()) { // SSE2
5153 SDValue Cst = DAG.getConstant(0, MVT::i32);
5154 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5156 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5157 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5159 } else if (VT.is256BitVector()) { // AVX
5160 if (Subtarget->hasInt256()) { // AVX2
5161 SDValue Cst = DAG.getConstant(0, MVT::i32);
5162 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5163 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5165 // 256-bit logic and arithmetic instructions in AVX are all
5166 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5167 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5168 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5169 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5171 } else if (VT.is512BitVector()) { // AVX-512
5172 SDValue Cst = DAG.getConstant(0, MVT::i32);
5173 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5174 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5175 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5176 } else if (VT.getScalarType() == MVT::i1) {
5177 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5178 SDValue Cst = DAG.getConstant(0, MVT::i1);
5179 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5180 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5182 llvm_unreachable("Unexpected vector type");
5184 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5187 /// getOnesVector - Returns a vector of specified type with all bits set.
5188 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5189 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5190 /// Then bitcast to their original type, ensuring they get CSE'd.
5191 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5193 assert(VT.isVector() && "Expected a vector type");
5195 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5197 if (VT.is256BitVector()) {
5198 if (HasInt256) { // AVX2
5199 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5200 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5202 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5203 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5205 } else if (VT.is128BitVector()) {
5206 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5208 llvm_unreachable("Unexpected vector type");
5210 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5213 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5214 /// that point to V2 points to its first element.
5215 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5216 for (unsigned i = 0; i != NumElems; ++i) {
5217 if (Mask[i] > (int)NumElems) {
5223 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5224 /// operation of specified width.
5225 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5227 unsigned NumElems = VT.getVectorNumElements();
5228 SmallVector<int, 8> Mask;
5229 Mask.push_back(NumElems);
5230 for (unsigned i = 1; i != NumElems; ++i)
5232 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5235 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5236 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5238 unsigned NumElems = VT.getVectorNumElements();
5239 SmallVector<int, 8> Mask;
5240 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5242 Mask.push_back(i + NumElems);
5244 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5247 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5248 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5250 unsigned NumElems = VT.getVectorNumElements();
5251 SmallVector<int, 8> Mask;
5252 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5253 Mask.push_back(i + Half);
5254 Mask.push_back(i + NumElems + Half);
5256 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5259 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5260 // a generic shuffle instruction because the target has no such instructions.
5261 // Generate shuffles which repeat i16 and i8 several times until they can be
5262 // represented by v4f32 and then be manipulated by target suported shuffles.
5263 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5264 MVT VT = V.getSimpleValueType();
5265 int NumElems = VT.getVectorNumElements();
5268 while (NumElems > 4) {
5269 if (EltNo < NumElems/2) {
5270 V = getUnpackl(DAG, dl, VT, V, V);
5272 V = getUnpackh(DAG, dl, VT, V, V);
5273 EltNo -= NumElems/2;
5280 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5281 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5282 MVT VT = V.getSimpleValueType();
5285 if (VT.is128BitVector()) {
5286 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5287 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5288 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5290 } else if (VT.is256BitVector()) {
5291 // To use VPERMILPS to splat scalars, the second half of indicies must
5292 // refer to the higher part, which is a duplication of the lower one,
5293 // because VPERMILPS can only handle in-lane permutations.
5294 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5295 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5297 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5298 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5301 llvm_unreachable("Vector size not supported");
5303 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5306 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5307 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5308 MVT SrcVT = SV->getSimpleValueType(0);
5309 SDValue V1 = SV->getOperand(0);
5312 int EltNo = SV->getSplatIndex();
5313 int NumElems = SrcVT.getVectorNumElements();
5314 bool Is256BitVec = SrcVT.is256BitVector();
5316 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5317 "Unknown how to promote splat for type");
5319 // Extract the 128-bit part containing the splat element and update
5320 // the splat element index when it refers to the higher register.
5322 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5323 if (EltNo >= NumElems/2)
5324 EltNo -= NumElems/2;
5327 // All i16 and i8 vector types can't be used directly by a generic shuffle
5328 // instruction because the target has no such instruction. Generate shuffles
5329 // which repeat i16 and i8 several times until they fit in i32, and then can
5330 // be manipulated by target suported shuffles.
5331 MVT EltVT = SrcVT.getVectorElementType();
5332 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5333 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5335 // Recreate the 256-bit vector and place the same 128-bit vector
5336 // into the low and high part. This is necessary because we want
5337 // to use VPERM* to shuffle the vectors
5339 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5342 return getLegalSplat(DAG, V1, EltNo);
5345 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5346 /// vector of zero or undef vector. This produces a shuffle where the low
5347 /// element of V2 is swizzled into the zero/undef vector, landing at element
5348 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5349 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5351 const X86Subtarget *Subtarget,
5352 SelectionDAG &DAG) {
5353 MVT VT = V2.getSimpleValueType();
5355 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5356 unsigned NumElems = VT.getVectorNumElements();
5357 SmallVector<int, 16> MaskVec;
5358 for (unsigned i = 0; i != NumElems; ++i)
5359 // If this is the insertion idx, put the low elt of V2 here.
5360 MaskVec.push_back(i == Idx ? NumElems : i);
5361 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5364 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5365 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5366 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5367 /// shuffles which use a single input multiple times, and in those cases it will
5368 /// adjust the mask to only have indices within that single input.
5369 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5370 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5371 unsigned NumElems = VT.getVectorNumElements();
5375 bool IsFakeUnary = false;
5376 switch(N->getOpcode()) {
5377 case X86ISD::BLENDI:
5378 ImmN = N->getOperand(N->getNumOperands()-1);
5379 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5382 ImmN = N->getOperand(N->getNumOperands()-1);
5383 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5384 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5386 case X86ISD::UNPCKH:
5387 DecodeUNPCKHMask(VT, Mask);
5388 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5390 case X86ISD::UNPCKL:
5391 DecodeUNPCKLMask(VT, Mask);
5392 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5394 case X86ISD::MOVHLPS:
5395 DecodeMOVHLPSMask(NumElems, Mask);
5396 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5398 case X86ISD::MOVLHPS:
5399 DecodeMOVLHPSMask(NumElems, Mask);
5400 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5402 case X86ISD::PALIGNR:
5403 ImmN = N->getOperand(N->getNumOperands()-1);
5404 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5406 case X86ISD::PSHUFD:
5407 case X86ISD::VPERMILPI:
5408 ImmN = N->getOperand(N->getNumOperands()-1);
5409 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5412 case X86ISD::PSHUFHW:
5413 ImmN = N->getOperand(N->getNumOperands()-1);
5414 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5417 case X86ISD::PSHUFLW:
5418 ImmN = N->getOperand(N->getNumOperands()-1);
5419 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5422 case X86ISD::PSHUFB: {
5424 SDValue MaskNode = N->getOperand(1);
5425 while (MaskNode->getOpcode() == ISD::BITCAST)
5426 MaskNode = MaskNode->getOperand(0);
5428 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5429 // If we have a build-vector, then things are easy.
5430 EVT VT = MaskNode.getValueType();
5431 assert(VT.isVector() &&
5432 "Can't produce a non-vector with a build_vector!");
5433 if (!VT.isInteger())
5436 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5438 SmallVector<uint64_t, 32> RawMask;
5439 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5440 SDValue Op = MaskNode->getOperand(i);
5441 if (Op->getOpcode() == ISD::UNDEF) {
5442 RawMask.push_back((uint64_t)SM_SentinelUndef);
5445 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5448 APInt MaskElement = CN->getAPIntValue();
5450 // We now have to decode the element which could be any integer size and
5451 // extract each byte of it.
5452 for (int j = 0; j < NumBytesPerElement; ++j) {
5453 // Note that this is x86 and so always little endian: the low byte is
5454 // the first byte of the mask.
5455 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5456 MaskElement = MaskElement.lshr(8);
5459 DecodePSHUFBMask(RawMask, Mask);
5463 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5467 SDValue Ptr = MaskLoad->getBasePtr();
5468 if (Ptr->getOpcode() == X86ISD::Wrapper)
5469 Ptr = Ptr->getOperand(0);
5471 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5472 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5475 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5476 DecodePSHUFBMask(C, Mask);
5484 case X86ISD::VPERMI:
5485 ImmN = N->getOperand(N->getNumOperands()-1);
5486 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5491 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5493 case X86ISD::VPERM2X128:
5494 ImmN = N->getOperand(N->getNumOperands()-1);
5495 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5496 if (Mask.empty()) return false;
5498 case X86ISD::MOVSLDUP:
5499 DecodeMOVSLDUPMask(VT, Mask);
5502 case X86ISD::MOVSHDUP:
5503 DecodeMOVSHDUPMask(VT, Mask);
5506 case X86ISD::MOVDDUP:
5507 DecodeMOVDDUPMask(VT, Mask);
5510 case X86ISD::MOVLHPD:
5511 case X86ISD::MOVLPD:
5512 case X86ISD::MOVLPS:
5513 // Not yet implemented
5515 default: llvm_unreachable("unknown target shuffle node");
5518 // If we have a fake unary shuffle, the shuffle mask is spread across two
5519 // inputs that are actually the same node. Re-map the mask to always point
5520 // into the first input.
5523 if (M >= (int)Mask.size())
5529 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5530 /// element of the result of the vector shuffle.
5531 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5534 return SDValue(); // Limit search depth.
5536 SDValue V = SDValue(N, 0);
5537 EVT VT = V.getValueType();
5538 unsigned Opcode = V.getOpcode();
5540 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5541 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5542 int Elt = SV->getMaskElt(Index);
5545 return DAG.getUNDEF(VT.getVectorElementType());
5547 unsigned NumElems = VT.getVectorNumElements();
5548 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5549 : SV->getOperand(1);
5550 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5553 // Recurse into target specific vector shuffles to find scalars.
5554 if (isTargetShuffle(Opcode)) {
5555 MVT ShufVT = V.getSimpleValueType();
5556 unsigned NumElems = ShufVT.getVectorNumElements();
5557 SmallVector<int, 16> ShuffleMask;
5560 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5563 int Elt = ShuffleMask[Index];
5565 return DAG.getUNDEF(ShufVT.getVectorElementType());
5567 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5569 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5573 // Actual nodes that may contain scalar elements
5574 if (Opcode == ISD::BITCAST) {
5575 V = V.getOperand(0);
5576 EVT SrcVT = V.getValueType();
5577 unsigned NumElems = VT.getVectorNumElements();
5579 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5583 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5584 return (Index == 0) ? V.getOperand(0)
5585 : DAG.getUNDEF(VT.getVectorElementType());
5587 if (V.getOpcode() == ISD::BUILD_VECTOR)
5588 return V.getOperand(Index);
5593 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5594 /// shuffle operation which come from a consecutively from a zero. The
5595 /// search can start in two different directions, from left or right.
5596 /// We count undefs as zeros until PreferredNum is reached.
5597 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5598 unsigned NumElems, bool ZerosFromLeft,
5600 unsigned PreferredNum = -1U) {
5601 unsigned NumZeros = 0;
5602 for (unsigned i = 0; i != NumElems; ++i) {
5603 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5604 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5608 if (X86::isZeroNode(Elt))
5610 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5611 NumZeros = std::min(NumZeros + 1, PreferredNum);
5619 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5620 /// correspond consecutively to elements from one of the vector operands,
5621 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5623 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5624 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5625 unsigned NumElems, unsigned &OpNum) {
5626 bool SeenV1 = false;
5627 bool SeenV2 = false;
5629 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5630 int Idx = SVOp->getMaskElt(i);
5631 // Ignore undef indicies
5635 if (Idx < (int)NumElems)
5640 // Only accept consecutive elements from the same vector
5641 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5645 OpNum = SeenV1 ? 0 : 1;
5649 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5650 /// logical left shift of a vector.
5651 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5652 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5654 SVOp->getSimpleValueType(0).getVectorNumElements();
5655 unsigned NumZeros = getNumOfConsecutiveZeros(
5656 SVOp, NumElems, false /* check zeros from right */, DAG,
5657 SVOp->getMaskElt(0));
5663 // Considering the elements in the mask that are not consecutive zeros,
5664 // check if they consecutively come from only one of the source vectors.
5666 // V1 = {X, A, B, C} 0
5668 // vector_shuffle V1, V2 <1, 2, 3, X>
5670 if (!isShuffleMaskConsecutive(SVOp,
5671 0, // Mask Start Index
5672 NumElems-NumZeros, // Mask End Index(exclusive)
5673 NumZeros, // Where to start looking in the src vector
5674 NumElems, // Number of elements in vector
5675 OpSrc)) // Which source operand ?
5680 ShVal = SVOp->getOperand(OpSrc);
5684 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5685 /// logical left shift of a vector.
5686 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5687 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5689 SVOp->getSimpleValueType(0).getVectorNumElements();
5690 unsigned NumZeros = getNumOfConsecutiveZeros(
5691 SVOp, NumElems, true /* check zeros from left */, DAG,
5692 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5698 // Considering the elements in the mask that are not consecutive zeros,
5699 // check if they consecutively come from only one of the source vectors.
5701 // 0 { A, B, X, X } = V2
5703 // vector_shuffle V1, V2 <X, X, 4, 5>
5705 if (!isShuffleMaskConsecutive(SVOp,
5706 NumZeros, // Mask Start Index
5707 NumElems, // Mask End Index(exclusive)
5708 0, // Where to start looking in the src vector
5709 NumElems, // Number of elements in vector
5710 OpSrc)) // Which source operand ?
5715 ShVal = SVOp->getOperand(OpSrc);
5719 /// isVectorShift - Returns true if the shuffle can be implemented as a
5720 /// logical left or right shift of a vector.
5721 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5722 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5723 // Although the logic below support any bitwidth size, there are no
5724 // shift instructions which handle more than 128-bit vectors.
5725 if (!SVOp->getSimpleValueType(0).is128BitVector())
5728 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5729 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5735 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5737 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5738 unsigned NumNonZero, unsigned NumZero,
5740 const X86Subtarget* Subtarget,
5741 const TargetLowering &TLI) {
5748 for (unsigned i = 0; i < 16; ++i) {
5749 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5750 if (ThisIsNonZero && First) {
5752 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5754 V = DAG.getUNDEF(MVT::v8i16);
5759 SDValue ThisElt, LastElt;
5760 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5761 if (LastIsNonZero) {
5762 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5763 MVT::i16, Op.getOperand(i-1));
5765 if (ThisIsNonZero) {
5766 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5767 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5768 ThisElt, DAG.getConstant(8, MVT::i8));
5770 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5774 if (ThisElt.getNode())
5775 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5776 DAG.getIntPtrConstant(i/2));
5780 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5783 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5785 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5786 unsigned NumNonZero, unsigned NumZero,
5788 const X86Subtarget* Subtarget,
5789 const TargetLowering &TLI) {
5796 for (unsigned i = 0; i < 8; ++i) {
5797 bool isNonZero = (NonZeros & (1 << i)) != 0;
5801 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5803 V = DAG.getUNDEF(MVT::v8i16);
5806 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5807 MVT::v8i16, V, Op.getOperand(i),
5808 DAG.getIntPtrConstant(i));
5815 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5816 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5817 const X86Subtarget *Subtarget,
5818 const TargetLowering &TLI) {
5819 // Find all zeroable elements.
5821 for (int i=0; i < 4; ++i) {
5822 SDValue Elt = Op->getOperand(i);
5823 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5825 assert(std::count_if(&Zeroable[0], &Zeroable[4],
5826 [](bool M) { return !M; }) > 1 &&
5827 "We expect at least two non-zero elements!");
5829 // We only know how to deal with build_vector nodes where elements are either
5830 // zeroable or extract_vector_elt with constant index.
5831 SDValue FirstNonZero;
5832 unsigned FirstNonZeroIdx;
5833 for (unsigned i=0; i < 4; ++i) {
5836 SDValue Elt = Op->getOperand(i);
5837 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5838 !isa<ConstantSDNode>(Elt.getOperand(1)))
5840 // Make sure that this node is extracting from a 128-bit vector.
5841 MVT VT = Elt.getOperand(0).getSimpleValueType();
5842 if (!VT.is128BitVector())
5844 if (!FirstNonZero.getNode()) {
5846 FirstNonZeroIdx = i;
5850 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5851 SDValue V1 = FirstNonZero.getOperand(0);
5852 MVT VT = V1.getSimpleValueType();
5854 // See if this build_vector can be lowered as a blend with zero.
5856 unsigned EltMaskIdx, EltIdx;
5858 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5859 if (Zeroable[EltIdx]) {
5860 // The zero vector will be on the right hand side.
5861 Mask[EltIdx] = EltIdx+4;
5865 Elt = Op->getOperand(EltIdx);
5866 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5867 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5868 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5870 Mask[EltIdx] = EltIdx;
5874 // Let the shuffle legalizer deal with blend operations.
5875 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5876 if (V1.getSimpleValueType() != VT)
5877 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5878 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5881 // See if we can lower this build_vector to a INSERTPS.
5882 if (!Subtarget->hasSSE41())
5885 SDValue V2 = Elt.getOperand(0);
5886 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5889 bool CanFold = true;
5890 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5894 SDValue Current = Op->getOperand(i);
5895 SDValue SrcVector = Current->getOperand(0);
5898 CanFold = SrcVector == V1 &&
5899 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5905 assert(V1.getNode() && "Expected at least two non-zero elements!");
5906 if (V1.getSimpleValueType() != MVT::v4f32)
5907 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5908 if (V2.getSimpleValueType() != MVT::v4f32)
5909 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5911 // Ok, we can emit an INSERTPS instruction.
5913 for (int i = 0; i < 4; ++i)
5917 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5918 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5919 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5920 DAG.getIntPtrConstant(InsertPSMask));
5921 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5924 /// Return a vector logical shift node.
5925 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5926 unsigned NumBits, SelectionDAG &DAG,
5927 const TargetLowering &TLI, SDLoc dl) {
5928 assert(VT.is128BitVector() && "Unknown type for VShift");
5929 MVT ShVT = MVT::v2i64;
5930 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5931 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5932 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
5933 SDValue ShiftVal = DAG.getConstant(NumBits, ScalarShiftTy);
5934 return DAG.getNode(ISD::BITCAST, dl, VT,
5935 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5939 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5941 // Check if the scalar load can be widened into a vector load. And if
5942 // the address is "base + cst" see if the cst can be "absorbed" into
5943 // the shuffle mask.
5944 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5945 SDValue Ptr = LD->getBasePtr();
5946 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5948 EVT PVT = LD->getValueType(0);
5949 if (PVT != MVT::i32 && PVT != MVT::f32)
5954 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5955 FI = FINode->getIndex();
5957 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5958 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5959 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5960 Offset = Ptr.getConstantOperandVal(1);
5961 Ptr = Ptr.getOperand(0);
5966 // FIXME: 256-bit vector instructions don't require a strict alignment,
5967 // improve this code to support it better.
5968 unsigned RequiredAlign = VT.getSizeInBits()/8;
5969 SDValue Chain = LD->getChain();
5970 // Make sure the stack object alignment is at least 16 or 32.
5971 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5972 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5973 if (MFI->isFixedObjectIndex(FI)) {
5974 // Can't change the alignment. FIXME: It's possible to compute
5975 // the exact stack offset and reference FI + adjust offset instead.
5976 // If someone *really* cares about this. That's the way to implement it.
5979 MFI->setObjectAlignment(FI, RequiredAlign);
5983 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5984 // Ptr + (Offset & ~15).
5987 if ((Offset % RequiredAlign) & 3)
5989 int64_t StartOffset = Offset & ~(RequiredAlign-1);
5991 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
5992 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
5994 int EltNo = (Offset - StartOffset) >> 2;
5995 unsigned NumElems = VT.getVectorNumElements();
5997 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
5998 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
5999 LD->getPointerInfo().getWithOffset(StartOffset),
6000 false, false, false, 0);
6002 SmallVector<int, 8> Mask;
6003 for (unsigned i = 0; i != NumElems; ++i)
6004 Mask.push_back(EltNo);
6006 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6012 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6013 /// elements can be replaced by a single large load which has the same value as
6014 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6016 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6018 /// FIXME: we'd also like to handle the case where the last elements are zero
6019 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6020 /// There's even a handy isZeroNode for that purpose.
6021 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6022 SDLoc &DL, SelectionDAG &DAG,
6023 bool isAfterLegalize) {
6024 unsigned NumElems = Elts.size();
6026 LoadSDNode *LDBase = nullptr;
6027 unsigned LastLoadedElt = -1U;
6029 // For each element in the initializer, see if we've found a load or an undef.
6030 // If we don't find an initial load element, or later load elements are
6031 // non-consecutive, bail out.
6032 for (unsigned i = 0; i < NumElems; ++i) {
6033 SDValue Elt = Elts[i];
6034 // Look through a bitcast.
6035 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
6036 Elt = Elt.getOperand(0);
6037 if (!Elt.getNode() ||
6038 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6041 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6043 LDBase = cast<LoadSDNode>(Elt.getNode());
6047 if (Elt.getOpcode() == ISD::UNDEF)
6050 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6051 EVT LdVT = Elt.getValueType();
6052 // Each loaded element must be the correct fractional portion of the
6053 // requested vector load.
6054 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
6056 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
6061 // If we have found an entire vector of loads and undefs, then return a large
6062 // load of the entire vector width starting at the base pointer. If we found
6063 // consecutive loads for the low half, generate a vzext_load node.
6064 if (LastLoadedElt == NumElems - 1) {
6065 assert(LDBase && "Did not find base load for merging consecutive loads");
6066 EVT EltVT = LDBase->getValueType(0);
6067 // Ensure that the input vector size for the merged loads matches the
6068 // cumulative size of the input elements.
6069 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
6072 if (isAfterLegalize &&
6073 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6076 SDValue NewLd = SDValue();
6078 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6079 LDBase->getPointerInfo(), LDBase->isVolatile(),
6080 LDBase->isNonTemporal(), LDBase->isInvariant(),
6081 LDBase->getAlignment());
6083 if (LDBase->hasAnyUseOfValue(1)) {
6084 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6086 SDValue(NewLd.getNode(), 1));
6087 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6088 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6089 SDValue(NewLd.getNode(), 1));
6095 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6096 //of a v4i32 / v4f32. It's probably worth generalizing.
6097 EVT EltVT = VT.getVectorElementType();
6098 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6099 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6100 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6101 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6103 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6104 LDBase->getPointerInfo(),
6105 LDBase->getAlignment(),
6106 false/*isVolatile*/, true/*ReadMem*/,
6109 // Make sure the newly-created LOAD is in the same position as LDBase in
6110 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6111 // update uses of LDBase's output chain to use the TokenFactor.
6112 if (LDBase->hasAnyUseOfValue(1)) {
6113 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6114 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6115 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6116 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6117 SDValue(ResNode.getNode(), 1));
6120 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6125 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6126 /// to generate a splat value for the following cases:
6127 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6128 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6129 /// a scalar load, or a constant.
6130 /// The VBROADCAST node is returned when a pattern is found,
6131 /// or SDValue() otherwise.
6132 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6133 SelectionDAG &DAG) {
6134 // VBROADCAST requires AVX.
6135 // TODO: Splats could be generated for non-AVX CPUs using SSE
6136 // instructions, but there's less potential gain for only 128-bit vectors.
6137 if (!Subtarget->hasAVX())
6140 MVT VT = Op.getSimpleValueType();
6143 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6144 "Unsupported vector type for broadcast.");
6149 switch (Op.getOpcode()) {
6151 // Unknown pattern found.
6154 case ISD::BUILD_VECTOR: {
6155 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6156 BitVector UndefElements;
6157 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6159 // We need a splat of a single value to use broadcast, and it doesn't
6160 // make any sense if the value is only in one element of the vector.
6161 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6165 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6166 Ld.getOpcode() == ISD::ConstantFP);
6168 // Make sure that all of the users of a non-constant load are from the
6169 // BUILD_VECTOR node.
6170 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6175 case ISD::VECTOR_SHUFFLE: {
6176 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6178 // Shuffles must have a splat mask where the first element is
6180 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6183 SDValue Sc = Op.getOperand(0);
6184 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6185 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6187 if (!Subtarget->hasInt256())
6190 // Use the register form of the broadcast instruction available on AVX2.
6191 if (VT.getSizeInBits() >= 256)
6192 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6193 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6196 Ld = Sc.getOperand(0);
6197 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6198 Ld.getOpcode() == ISD::ConstantFP);
6200 // The scalar_to_vector node and the suspected
6201 // load node must have exactly one user.
6202 // Constants may have multiple users.
6204 // AVX-512 has register version of the broadcast
6205 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6206 Ld.getValueType().getSizeInBits() >= 32;
6207 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6214 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6215 bool IsGE256 = (VT.getSizeInBits() >= 256);
6217 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6218 // instruction to save 8 or more bytes of constant pool data.
6219 // TODO: If multiple splats are generated to load the same constant,
6220 // it may be detrimental to overall size. There needs to be a way to detect
6221 // that condition to know if this is truly a size win.
6222 const Function *F = DAG.getMachineFunction().getFunction();
6223 bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
6225 // Handle broadcasting a single constant scalar from the constant pool
6227 // On Sandybridge (no AVX2), it is still better to load a constant vector
6228 // from the constant pool and not to broadcast it from a scalar.
6229 // But override that restriction when optimizing for size.
6230 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6231 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6232 EVT CVT = Ld.getValueType();
6233 assert(!CVT.isVector() && "Must not broadcast a vector type");
6235 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6236 // For size optimization, also splat v2f64 and v2i64, and for size opt
6237 // with AVX2, also splat i8 and i16.
6238 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6239 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6240 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6241 const Constant *C = nullptr;
6242 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6243 C = CI->getConstantIntValue();
6244 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6245 C = CF->getConstantFPValue();
6247 assert(C && "Invalid constant type");
6249 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6250 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6251 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6252 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6253 MachinePointerInfo::getConstantPool(),
6254 false, false, false, Alignment);
6256 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6260 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6262 // Handle AVX2 in-register broadcasts.
6263 if (!IsLoad && Subtarget->hasInt256() &&
6264 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6265 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6267 // The scalar source must be a normal load.
6271 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6272 (Subtarget->hasVLX() && ScalarSize == 64))
6273 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6275 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6276 // double since there is no vbroadcastsd xmm
6277 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6278 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6279 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6282 // Unsupported broadcast.
6286 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6287 /// underlying vector and index.
6289 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6291 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6293 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6294 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6297 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6299 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6301 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6302 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6305 // In this case the vector is the extract_subvector expression and the index
6306 // is 2, as specified by the shuffle.
6307 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6308 SDValue ShuffleVec = SVOp->getOperand(0);
6309 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6310 assert(ShuffleVecVT.getVectorElementType() ==
6311 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6313 int ShuffleIdx = SVOp->getMaskElt(Idx);
6314 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6315 ExtractedFromVec = ShuffleVec;
6321 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6322 MVT VT = Op.getSimpleValueType();
6324 // Skip if insert_vec_elt is not supported.
6325 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6326 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6330 unsigned NumElems = Op.getNumOperands();
6334 SmallVector<unsigned, 4> InsertIndices;
6335 SmallVector<int, 8> Mask(NumElems, -1);
6337 for (unsigned i = 0; i != NumElems; ++i) {
6338 unsigned Opc = Op.getOperand(i).getOpcode();
6340 if (Opc == ISD::UNDEF)
6343 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6344 // Quit if more than 1 elements need inserting.
6345 if (InsertIndices.size() > 1)
6348 InsertIndices.push_back(i);
6352 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6353 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6354 // Quit if non-constant index.
6355 if (!isa<ConstantSDNode>(ExtIdx))
6357 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6359 // Quit if extracted from vector of different type.
6360 if (ExtractedFromVec.getValueType() != VT)
6363 if (!VecIn1.getNode())
6364 VecIn1 = ExtractedFromVec;
6365 else if (VecIn1 != ExtractedFromVec) {
6366 if (!VecIn2.getNode())
6367 VecIn2 = ExtractedFromVec;
6368 else if (VecIn2 != ExtractedFromVec)
6369 // Quit if more than 2 vectors to shuffle
6373 if (ExtractedFromVec == VecIn1)
6375 else if (ExtractedFromVec == VecIn2)
6376 Mask[i] = Idx + NumElems;
6379 if (!VecIn1.getNode())
6382 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6383 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6384 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6385 unsigned Idx = InsertIndices[i];
6386 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6387 DAG.getIntPtrConstant(Idx));
6393 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6395 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6397 MVT VT = Op.getSimpleValueType();
6398 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6399 "Unexpected type in LowerBUILD_VECTORvXi1!");
6402 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6403 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6404 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6405 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6408 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6409 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6410 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6411 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6414 bool AllContants = true;
6415 uint64_t Immediate = 0;
6416 int NonConstIdx = -1;
6417 bool IsSplat = true;
6418 unsigned NumNonConsts = 0;
6419 unsigned NumConsts = 0;
6420 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6421 SDValue In = Op.getOperand(idx);
6422 if (In.getOpcode() == ISD::UNDEF)
6424 if (!isa<ConstantSDNode>(In)) {
6425 AllContants = false;
6430 if (cast<ConstantSDNode>(In)->getZExtValue())
6431 Immediate |= (1ULL << idx);
6433 if (In != Op.getOperand(0))
6438 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6439 DAG.getConstant(Immediate, MVT::i16));
6440 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6441 DAG.getIntPtrConstant(0));
6444 if (NumNonConsts == 1 && NonConstIdx != 0) {
6447 SDValue VecAsImm = DAG.getConstant(Immediate,
6448 MVT::getIntegerVT(VT.getSizeInBits()));
6449 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6452 DstVec = DAG.getUNDEF(VT);
6453 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6454 Op.getOperand(NonConstIdx),
6455 DAG.getIntPtrConstant(NonConstIdx));
6457 if (!IsSplat && (NonConstIdx != 0))
6458 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6459 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6462 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6463 DAG.getConstant(-1, SelectVT),
6464 DAG.getConstant(0, SelectVT));
6466 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6467 DAG.getConstant((Immediate | 1), SelectVT),
6468 DAG.getConstant(Immediate, SelectVT));
6469 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6472 /// \brief Return true if \p N implements a horizontal binop and return the
6473 /// operands for the horizontal binop into V0 and V1.
6475 /// This is a helper function of PerformBUILD_VECTORCombine.
6476 /// This function checks that the build_vector \p N in input implements a
6477 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6478 /// operation to match.
6479 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6480 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6481 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6484 /// This function only analyzes elements of \p N whose indices are
6485 /// in range [BaseIdx, LastIdx).
6486 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6488 unsigned BaseIdx, unsigned LastIdx,
6489 SDValue &V0, SDValue &V1) {
6490 EVT VT = N->getValueType(0);
6492 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6493 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6494 "Invalid Vector in input!");
6496 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6497 bool CanFold = true;
6498 unsigned ExpectedVExtractIdx = BaseIdx;
6499 unsigned NumElts = LastIdx - BaseIdx;
6500 V0 = DAG.getUNDEF(VT);
6501 V1 = DAG.getUNDEF(VT);
6503 // Check if N implements a horizontal binop.
6504 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6505 SDValue Op = N->getOperand(i + BaseIdx);
6508 if (Op->getOpcode() == ISD::UNDEF) {
6509 // Update the expected vector extract index.
6510 if (i * 2 == NumElts)
6511 ExpectedVExtractIdx = BaseIdx;
6512 ExpectedVExtractIdx += 2;
6516 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6521 SDValue Op0 = Op.getOperand(0);
6522 SDValue Op1 = Op.getOperand(1);
6524 // Try to match the following pattern:
6525 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6526 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6527 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6528 Op0.getOperand(0) == Op1.getOperand(0) &&
6529 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6530 isa<ConstantSDNode>(Op1.getOperand(1)));
6534 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6535 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6537 if (i * 2 < NumElts) {
6538 if (V0.getOpcode() == ISD::UNDEF)
6539 V0 = Op0.getOperand(0);
6541 if (V1.getOpcode() == ISD::UNDEF)
6542 V1 = Op0.getOperand(0);
6543 if (i * 2 == NumElts)
6544 ExpectedVExtractIdx = BaseIdx;
6547 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6548 if (I0 == ExpectedVExtractIdx)
6549 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6550 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6551 // Try to match the following dag sequence:
6552 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6553 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6557 ExpectedVExtractIdx += 2;
6563 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6564 /// a concat_vector.
6566 /// This is a helper function of PerformBUILD_VECTORCombine.
6567 /// This function expects two 256-bit vectors called V0 and V1.
6568 /// At first, each vector is split into two separate 128-bit vectors.
6569 /// Then, the resulting 128-bit vectors are used to implement two
6570 /// horizontal binary operations.
6572 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6574 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6575 /// the two new horizontal binop.
6576 /// When Mode is set, the first horizontal binop dag node would take as input
6577 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6578 /// horizontal binop dag node would take as input the lower 128-bit of V1
6579 /// and the upper 128-bit of V1.
6581 /// HADD V0_LO, V0_HI
6582 /// HADD V1_LO, V1_HI
6584 /// Otherwise, the first horizontal binop dag node takes as input the lower
6585 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6586 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6588 /// HADD V0_LO, V1_LO
6589 /// HADD V0_HI, V1_HI
6591 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6592 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6593 /// the upper 128-bits of the result.
6594 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6595 SDLoc DL, SelectionDAG &DAG,
6596 unsigned X86Opcode, bool Mode,
6597 bool isUndefLO, bool isUndefHI) {
6598 EVT VT = V0.getValueType();
6599 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6600 "Invalid nodes in input!");
6602 unsigned NumElts = VT.getVectorNumElements();
6603 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6604 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6605 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6606 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6607 EVT NewVT = V0_LO.getValueType();
6609 SDValue LO = DAG.getUNDEF(NewVT);
6610 SDValue HI = DAG.getUNDEF(NewVT);
6613 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6614 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6615 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6616 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6617 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6619 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6620 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6621 V1_LO->getOpcode() != ISD::UNDEF))
6622 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6624 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6625 V1_HI->getOpcode() != ISD::UNDEF))
6626 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6629 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6632 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6633 /// sequence of 'vadd + vsub + blendi'.
6634 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6635 const X86Subtarget *Subtarget) {
6637 EVT VT = BV->getValueType(0);
6638 unsigned NumElts = VT.getVectorNumElements();
6639 SDValue InVec0 = DAG.getUNDEF(VT);
6640 SDValue InVec1 = DAG.getUNDEF(VT);
6642 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6643 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6645 // Odd-numbered elements in the input build vector are obtained from
6646 // adding two integer/float elements.
6647 // Even-numbered elements in the input build vector are obtained from
6648 // subtracting two integer/float elements.
6649 unsigned ExpectedOpcode = ISD::FSUB;
6650 unsigned NextExpectedOpcode = ISD::FADD;
6651 bool AddFound = false;
6652 bool SubFound = false;
6654 for (unsigned i = 0, e = NumElts; i != e; ++i) {
6655 SDValue Op = BV->getOperand(i);
6657 // Skip 'undef' values.
6658 unsigned Opcode = Op.getOpcode();
6659 if (Opcode == ISD::UNDEF) {
6660 std::swap(ExpectedOpcode, NextExpectedOpcode);
6664 // Early exit if we found an unexpected opcode.
6665 if (Opcode != ExpectedOpcode)
6668 SDValue Op0 = Op.getOperand(0);
6669 SDValue Op1 = Op.getOperand(1);
6671 // Try to match the following pattern:
6672 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6673 // Early exit if we cannot match that sequence.
6674 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6675 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6676 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6677 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6678 Op0.getOperand(1) != Op1.getOperand(1))
6681 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6685 // We found a valid add/sub node. Update the information accordingly.
6691 // Update InVec0 and InVec1.
6692 if (InVec0.getOpcode() == ISD::UNDEF)
6693 InVec0 = Op0.getOperand(0);
6694 if (InVec1.getOpcode() == ISD::UNDEF)
6695 InVec1 = Op1.getOperand(0);
6697 // Make sure that operands in input to each add/sub node always
6698 // come from a same pair of vectors.
6699 if (InVec0 != Op0.getOperand(0)) {
6700 if (ExpectedOpcode == ISD::FSUB)
6703 // FADD is commutable. Try to commute the operands
6704 // and then test again.
6705 std::swap(Op0, Op1);
6706 if (InVec0 != Op0.getOperand(0))
6710 if (InVec1 != Op1.getOperand(0))
6713 // Update the pair of expected opcodes.
6714 std::swap(ExpectedOpcode, NextExpectedOpcode);
6717 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6718 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6719 InVec1.getOpcode() != ISD::UNDEF)
6720 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6725 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6726 const X86Subtarget *Subtarget) {
6728 EVT VT = N->getValueType(0);
6729 unsigned NumElts = VT.getVectorNumElements();
6730 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6731 SDValue InVec0, InVec1;
6733 // Try to match an ADDSUB.
6734 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6735 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6736 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6737 if (Value.getNode())
6741 // Try to match horizontal ADD/SUB.
6742 unsigned NumUndefsLO = 0;
6743 unsigned NumUndefsHI = 0;
6744 unsigned Half = NumElts/2;
6746 // Count the number of UNDEF operands in the build_vector in input.
6747 for (unsigned i = 0, e = Half; i != e; ++i)
6748 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6751 for (unsigned i = Half, e = NumElts; i != e; ++i)
6752 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6755 // Early exit if this is either a build_vector of all UNDEFs or all the
6756 // operands but one are UNDEF.
6757 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6760 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6761 // Try to match an SSE3 float HADD/HSUB.
6762 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6763 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6765 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6766 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6767 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6768 // Try to match an SSSE3 integer HADD/HSUB.
6769 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6770 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6772 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6773 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6776 if (!Subtarget->hasAVX())
6779 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6780 // Try to match an AVX horizontal add/sub of packed single/double
6781 // precision floating point values from 256-bit vectors.
6782 SDValue InVec2, InVec3;
6783 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6784 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6785 ((InVec0.getOpcode() == ISD::UNDEF ||
6786 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6787 ((InVec1.getOpcode() == ISD::UNDEF ||
6788 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6789 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6791 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6792 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6793 ((InVec0.getOpcode() == ISD::UNDEF ||
6794 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6795 ((InVec1.getOpcode() == ISD::UNDEF ||
6796 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6797 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6798 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6799 // Try to match an AVX2 horizontal add/sub of signed integers.
6800 SDValue InVec2, InVec3;
6802 bool CanFold = true;
6804 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6805 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6806 ((InVec0.getOpcode() == ISD::UNDEF ||
6807 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6808 ((InVec1.getOpcode() == ISD::UNDEF ||
6809 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6810 X86Opcode = X86ISD::HADD;
6811 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6812 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6813 ((InVec0.getOpcode() == ISD::UNDEF ||
6814 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6815 ((InVec1.getOpcode() == ISD::UNDEF ||
6816 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6817 X86Opcode = X86ISD::HSUB;
6822 // Fold this build_vector into a single horizontal add/sub.
6823 // Do this only if the target has AVX2.
6824 if (Subtarget->hasAVX2())
6825 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6827 // Do not try to expand this build_vector into a pair of horizontal
6828 // add/sub if we can emit a pair of scalar add/sub.
6829 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6832 // Convert this build_vector into a pair of horizontal binop followed by
6834 bool isUndefLO = NumUndefsLO == Half;
6835 bool isUndefHI = NumUndefsHI == Half;
6836 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6837 isUndefLO, isUndefHI);
6841 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6842 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6844 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6845 X86Opcode = X86ISD::HADD;
6846 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6847 X86Opcode = X86ISD::HSUB;
6848 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6849 X86Opcode = X86ISD::FHADD;
6850 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6851 X86Opcode = X86ISD::FHSUB;
6855 // Don't try to expand this build_vector into a pair of horizontal add/sub
6856 // if we can simply emit a pair of scalar add/sub.
6857 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6860 // Convert this build_vector into two horizontal add/sub followed by
6862 bool isUndefLO = NumUndefsLO == Half;
6863 bool isUndefHI = NumUndefsHI == Half;
6864 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6865 isUndefLO, isUndefHI);
6872 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6875 MVT VT = Op.getSimpleValueType();
6876 MVT ExtVT = VT.getVectorElementType();
6877 unsigned NumElems = Op.getNumOperands();
6879 // Generate vectors for predicate vectors.
6880 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6881 return LowerBUILD_VECTORvXi1(Op, DAG);
6883 // Vectors containing all zeros can be matched by pxor and xorps later
6884 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6885 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6886 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6887 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6890 return getZeroVector(VT, Subtarget, DAG, dl);
6893 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6894 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6895 // vpcmpeqd on 256-bit vectors.
6896 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6897 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6900 if (!VT.is512BitVector())
6901 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6904 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6905 if (Broadcast.getNode())
6908 unsigned EVTBits = ExtVT.getSizeInBits();
6910 unsigned NumZero = 0;
6911 unsigned NumNonZero = 0;
6912 unsigned NonZeros = 0;
6913 bool IsAllConstants = true;
6914 SmallSet<SDValue, 8> Values;
6915 for (unsigned i = 0; i < NumElems; ++i) {
6916 SDValue Elt = Op.getOperand(i);
6917 if (Elt.getOpcode() == ISD::UNDEF)
6920 if (Elt.getOpcode() != ISD::Constant &&
6921 Elt.getOpcode() != ISD::ConstantFP)
6922 IsAllConstants = false;
6923 if (X86::isZeroNode(Elt))
6926 NonZeros |= (1 << i);
6931 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6932 if (NumNonZero == 0)
6933 return DAG.getUNDEF(VT);
6935 // Special case for single non-zero, non-undef, element.
6936 if (NumNonZero == 1) {
6937 unsigned Idx = countTrailingZeros(NonZeros);
6938 SDValue Item = Op.getOperand(Idx);
6940 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6941 // the value are obviously zero, truncate the value to i32 and do the
6942 // insertion that way. Only do this if the value is non-constant or if the
6943 // value is a constant being inserted into element 0. It is cheaper to do
6944 // a constant pool load than it is to do a movd + shuffle.
6945 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6946 (!IsAllConstants || Idx == 0)) {
6947 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6949 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6950 EVT VecVT = MVT::v4i32;
6951 unsigned VecElts = 4;
6953 // Truncate the value (which may itself be a constant) to i32, and
6954 // convert it to a vector with movd (S2V+shuffle to zero extend).
6955 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6956 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6958 // If using the new shuffle lowering, just directly insert this.
6959 if (ExperimentalVectorShuffleLowering)
6961 ISD::BITCAST, dl, VT,
6962 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6964 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6966 // Now we have our 32-bit value zero extended in the low element of
6967 // a vector. If Idx != 0, swizzle it into place.
6969 SmallVector<int, 4> Mask;
6970 Mask.push_back(Idx);
6971 for (unsigned i = 1; i != VecElts; ++i)
6973 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6976 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6980 // If we have a constant or non-constant insertion into the low element of
6981 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6982 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6983 // depending on what the source datatype is.
6986 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6988 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6989 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6990 if (VT.is256BitVector() || VT.is512BitVector()) {
6991 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6992 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6993 Item, DAG.getIntPtrConstant(0));
6995 assert(VT.is128BitVector() && "Expected an SSE value type!");
6996 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6997 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
6998 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7001 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7002 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7003 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7004 if (VT.is256BitVector()) {
7005 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7006 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7008 assert(VT.is128BitVector() && "Expected an SSE value type!");
7009 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7011 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7015 // Is it a vector logical left shift?
7016 if (NumElems == 2 && Idx == 1 &&
7017 X86::isZeroNode(Op.getOperand(0)) &&
7018 !X86::isZeroNode(Op.getOperand(1))) {
7019 unsigned NumBits = VT.getSizeInBits();
7020 return getVShift(true, VT,
7021 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7022 VT, Op.getOperand(1)),
7023 NumBits/2, DAG, *this, dl);
7026 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7029 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7030 // is a non-constant being inserted into an element other than the low one,
7031 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7032 // movd/movss) to move this into the low element, then shuffle it into
7034 if (EVTBits == 32) {
7035 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7037 // If using the new shuffle lowering, just directly insert this.
7038 if (ExperimentalVectorShuffleLowering)
7039 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7041 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7042 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7043 SmallVector<int, 8> MaskVec;
7044 for (unsigned i = 0; i != NumElems; ++i)
7045 MaskVec.push_back(i == Idx ? 0 : 1);
7046 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7050 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7051 if (Values.size() == 1) {
7052 if (EVTBits == 32) {
7053 // Instead of a shuffle like this:
7054 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7055 // Check if it's possible to issue this instead.
7056 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7057 unsigned Idx = countTrailingZeros(NonZeros);
7058 SDValue Item = Op.getOperand(Idx);
7059 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7060 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7065 // A vector full of immediates; various special cases are already
7066 // handled, so this is best done with a single constant-pool load.
7070 // For AVX-length vectors, see if we can use a vector load to get all of the
7071 // elements, otherwise build the individual 128-bit pieces and use
7072 // shuffles to put them in place.
7073 if (VT.is256BitVector() || VT.is512BitVector()) {
7074 SmallVector<SDValue, 64> V;
7075 for (unsigned i = 0; i != NumElems; ++i)
7076 V.push_back(Op.getOperand(i));
7078 // Check for a build vector of consecutive loads.
7079 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7082 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7084 // Build both the lower and upper subvector.
7085 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7086 makeArrayRef(&V[0], NumElems/2));
7087 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7088 makeArrayRef(&V[NumElems / 2], NumElems/2));
7090 // Recreate the wider vector with the lower and upper part.
7091 if (VT.is256BitVector())
7092 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7093 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7096 // Let legalizer expand 2-wide build_vectors.
7097 if (EVTBits == 64) {
7098 if (NumNonZero == 1) {
7099 // One half is zero or undef.
7100 unsigned Idx = countTrailingZeros(NonZeros);
7101 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7102 Op.getOperand(Idx));
7103 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7108 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7109 if (EVTBits == 8 && NumElems == 16) {
7110 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7112 if (V.getNode()) return V;
7115 if (EVTBits == 16 && NumElems == 8) {
7116 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7118 if (V.getNode()) return V;
7121 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7122 if (EVTBits == 32 && NumElems == 4) {
7123 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7128 // If element VT is == 32 bits, turn it into a number of shuffles.
7129 SmallVector<SDValue, 8> V(NumElems);
7130 if (NumElems == 4 && NumZero > 0) {
7131 for (unsigned i = 0; i < 4; ++i) {
7132 bool isZero = !(NonZeros & (1 << i));
7134 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7136 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7139 for (unsigned i = 0; i < 2; ++i) {
7140 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7143 V[i] = V[i*2]; // Must be a zero vector.
7146 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7149 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7152 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7157 bool Reverse1 = (NonZeros & 0x3) == 2;
7158 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7162 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7163 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7165 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7168 if (Values.size() > 1 && VT.is128BitVector()) {
7169 // Check for a build vector of consecutive loads.
7170 for (unsigned i = 0; i < NumElems; ++i)
7171 V[i] = Op.getOperand(i);
7173 // Check for elements which are consecutive loads.
7174 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7178 // Check for a build vector from mostly shuffle plus few inserting.
7179 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7183 // For SSE 4.1, use insertps to put the high elements into the low element.
7184 if (Subtarget->hasSSE41()) {
7186 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7187 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7189 Result = DAG.getUNDEF(VT);
7191 for (unsigned i = 1; i < NumElems; ++i) {
7192 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7193 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7194 Op.getOperand(i), DAG.getIntPtrConstant(i));
7199 // Otherwise, expand into a number of unpckl*, start by extending each of
7200 // our (non-undef) elements to the full vector width with the element in the
7201 // bottom slot of the vector (which generates no code for SSE).
7202 for (unsigned i = 0; i < NumElems; ++i) {
7203 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7204 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7206 V[i] = DAG.getUNDEF(VT);
7209 // Next, we iteratively mix elements, e.g. for v4f32:
7210 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7211 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7212 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7213 unsigned EltStride = NumElems >> 1;
7214 while (EltStride != 0) {
7215 for (unsigned i = 0; i < EltStride; ++i) {
7216 // If V[i+EltStride] is undef and this is the first round of mixing,
7217 // then it is safe to just drop this shuffle: V[i] is already in the
7218 // right place, the one element (since it's the first round) being
7219 // inserted as undef can be dropped. This isn't safe for successive
7220 // rounds because they will permute elements within both vectors.
7221 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7222 EltStride == NumElems/2)
7225 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7234 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7235 // to create 256-bit vectors from two other 128-bit ones.
7236 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7238 MVT ResVT = Op.getSimpleValueType();
7240 assert((ResVT.is256BitVector() ||
7241 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7243 SDValue V1 = Op.getOperand(0);
7244 SDValue V2 = Op.getOperand(1);
7245 unsigned NumElems = ResVT.getVectorNumElements();
7246 if(ResVT.is256BitVector())
7247 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7249 if (Op.getNumOperands() == 4) {
7250 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7251 ResVT.getVectorNumElements()/2);
7252 SDValue V3 = Op.getOperand(2);
7253 SDValue V4 = Op.getOperand(3);
7254 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7255 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7257 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7260 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7261 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7262 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7263 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7264 Op.getNumOperands() == 4)));
7266 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7267 // from two other 128-bit ones.
7269 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7270 return LowerAVXCONCAT_VECTORS(Op, DAG);
7274 //===----------------------------------------------------------------------===//
7275 // Vector shuffle lowering
7277 // This is an experimental code path for lowering vector shuffles on x86. It is
7278 // designed to handle arbitrary vector shuffles and blends, gracefully
7279 // degrading performance as necessary. It works hard to recognize idiomatic
7280 // shuffles and lower them to optimal instruction patterns without leaving
7281 // a framework that allows reasonably efficient handling of all vector shuffle
7283 //===----------------------------------------------------------------------===//
7285 /// \brief Tiny helper function to identify a no-op mask.
7287 /// This is a somewhat boring predicate function. It checks whether the mask
7288 /// array input, which is assumed to be a single-input shuffle mask of the kind
7289 /// used by the X86 shuffle instructions (not a fully general
7290 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7291 /// in-place shuffle are 'no-op's.
7292 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7293 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7294 if (Mask[i] != -1 && Mask[i] != i)
7299 /// \brief Helper function to classify a mask as a single-input mask.
7301 /// This isn't a generic single-input test because in the vector shuffle
7302 /// lowering we canonicalize single inputs to be the first input operand. This
7303 /// means we can more quickly test for a single input by only checking whether
7304 /// an input from the second operand exists. We also assume that the size of
7305 /// mask corresponds to the size of the input vectors which isn't true in the
7306 /// fully general case.
7307 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7309 if (M >= (int)Mask.size())
7314 /// \brief Test whether there are elements crossing 128-bit lanes in this
7317 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7318 /// and we routinely test for these.
7319 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7320 int LaneSize = 128 / VT.getScalarSizeInBits();
7321 int Size = Mask.size();
7322 for (int i = 0; i < Size; ++i)
7323 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7328 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7330 /// This checks a shuffle mask to see if it is performing the same
7331 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7332 /// that it is also not lane-crossing. It may however involve a blend from the
7333 /// same lane of a second vector.
7335 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7336 /// non-trivial to compute in the face of undef lanes. The representation is
7337 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7338 /// entries from both V1 and V2 inputs to the wider mask.
7340 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7341 SmallVectorImpl<int> &RepeatedMask) {
7342 int LaneSize = 128 / VT.getScalarSizeInBits();
7343 RepeatedMask.resize(LaneSize, -1);
7344 int Size = Mask.size();
7345 for (int i = 0; i < Size; ++i) {
7348 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7349 // This entry crosses lanes, so there is no way to model this shuffle.
7352 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7353 if (RepeatedMask[i % LaneSize] == -1)
7354 // This is the first non-undef entry in this slot of a 128-bit lane.
7355 RepeatedMask[i % LaneSize] =
7356 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7357 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7358 // Found a mismatch with the repeated mask.
7364 /// \brief Base case helper for testing a single mask element.
7365 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7366 BuildVectorSDNode *BV1,
7367 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7369 int Size = Mask.size();
7370 if (Mask[i] != -1 && Mask[i] != Arg) {
7371 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
7372 auto *ArgsBV = Arg < Size ? BV1 : BV2;
7373 if (!MaskBV || !ArgsBV ||
7374 MaskBV->getOperand(Mask[i] % Size) != ArgsBV->getOperand(Arg % Size))
7380 /// \brief Recursive helper to peel off and test each mask element.
7381 template <typename... Ts>
7382 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7383 BuildVectorSDNode *BV1,
7384 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7385 int i, int Arg, Ts... Args) {
7386 if (!isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i, Arg))
7389 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i + 1, Args...);
7392 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7395 /// This is a fast way to test a shuffle mask against a fixed pattern:
7397 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7399 /// It returns true if the mask is exactly as wide as the argument list, and
7400 /// each element of the mask is either -1 (signifying undef) or the value given
7401 /// in the argument.
7402 template <typename... Ts>
7403 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
7405 if (Mask.size() != sizeof...(Args))
7408 // If the values are build vectors, we can look through them to find
7409 // equivalent inputs that make the shuffles equivalent.
7410 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
7411 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
7413 // Recursively peel off arguments and test them against the mask.
7414 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, 0, Args...);
7417 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7419 /// This helper function produces an 8-bit shuffle immediate corresponding to
7420 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7421 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7424 /// NB: We rely heavily on "undef" masks preserving the input lane.
7425 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7426 SelectionDAG &DAG) {
7427 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7428 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7429 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7430 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7431 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7434 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7435 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7436 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7437 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7438 return DAG.getConstant(Imm, MVT::i8);
7441 /// \brief Try to emit a blend instruction for a shuffle.
7443 /// This doesn't do any checks for the availability of instructions for blending
7444 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7445 /// be matched in the backend with the type given. What it does check for is
7446 /// that the shuffle mask is in fact a blend.
7447 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7448 SDValue V2, ArrayRef<int> Mask,
7449 const X86Subtarget *Subtarget,
7450 SelectionDAG &DAG) {
7452 unsigned BlendMask = 0;
7453 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7454 if (Mask[i] >= Size) {
7455 if (Mask[i] != i + Size)
7456 return SDValue(); // Shuffled V2 input!
7457 BlendMask |= 1u << i;
7460 if (Mask[i] >= 0 && Mask[i] != i)
7461 return SDValue(); // Shuffled V1 input!
7463 switch (VT.SimpleTy) {
7468 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7469 DAG.getConstant(BlendMask, MVT::i8));
7473 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7477 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7478 // that instruction.
7479 if (Subtarget->hasAVX2()) {
7480 // Scale the blend by the number of 32-bit dwords per element.
7481 int Scale = VT.getScalarSizeInBits() / 32;
7483 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7484 if (Mask[i] >= Size)
7485 for (int j = 0; j < Scale; ++j)
7486 BlendMask |= 1u << (i * Scale + j);
7488 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7489 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7490 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7491 return DAG.getNode(ISD::BITCAST, DL, VT,
7492 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7493 DAG.getConstant(BlendMask, MVT::i8)));
7497 // For integer shuffles we need to expand the mask and cast the inputs to
7498 // v8i16s prior to blending.
7499 int Scale = 8 / VT.getVectorNumElements();
7501 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7502 if (Mask[i] >= Size)
7503 for (int j = 0; j < Scale; ++j)
7504 BlendMask |= 1u << (i * Scale + j);
7506 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7507 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7508 return DAG.getNode(ISD::BITCAST, DL, VT,
7509 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7510 DAG.getConstant(BlendMask, MVT::i8)));
7514 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7515 SmallVector<int, 8> RepeatedMask;
7516 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7517 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7518 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7520 for (int i = 0; i < 8; ++i)
7521 if (RepeatedMask[i] >= 16)
7522 BlendMask |= 1u << i;
7523 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7524 DAG.getConstant(BlendMask, MVT::i8));
7529 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7530 // Scale the blend by the number of bytes per element.
7531 int Scale = VT.getScalarSizeInBits() / 8;
7532 assert(Mask.size() * Scale == 32 && "Not a 256-bit vector!");
7534 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7535 // mix of LLVM's code generator and the x86 backend. We tell the code
7536 // generator that boolean values in the elements of an x86 vector register
7537 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7538 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7539 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7540 // of the element (the remaining are ignored) and 0 in that high bit would
7541 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7542 // the LLVM model for boolean values in vector elements gets the relevant
7543 // bit set, it is set backwards and over constrained relative to x86's
7545 SDValue VSELECTMask[32];
7546 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7547 for (int j = 0; j < Scale; ++j)
7548 VSELECTMask[Scale * i + j] =
7549 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7550 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8);
7552 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1);
7553 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V2);
7555 ISD::BITCAST, DL, VT,
7556 DAG.getNode(ISD::VSELECT, DL, MVT::v32i8,
7557 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, VSELECTMask),
7562 llvm_unreachable("Not a supported integer vector type!");
7566 /// \brief Try to lower as a blend of elements from two inputs followed by
7567 /// a single-input permutation.
7569 /// This matches the pattern where we can blend elements from two inputs and
7570 /// then reduce the shuffle to a single-input permutation.
7571 static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
7574 SelectionDAG &DAG) {
7575 // We build up the blend mask while checking whether a blend is a viable way
7576 // to reduce the shuffle.
7577 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7578 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
7580 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7584 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
7586 if (BlendMask[Mask[i] % Size] == -1)
7587 BlendMask[Mask[i] % Size] = Mask[i];
7588 else if (BlendMask[Mask[i] % Size] != Mask[i])
7589 return SDValue(); // Can't blend in the needed input!
7591 PermuteMask[i] = Mask[i] % Size;
7594 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7595 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
7598 /// \brief Generic routine to decompose a shuffle and blend into indepndent
7599 /// blends and permutes.
7601 /// This matches the extremely common pattern for handling combined
7602 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7603 /// operations. It will try to pick the best arrangement of shuffles and
7605 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7609 SelectionDAG &DAG) {
7610 // Shuffle the input elements into the desired positions in V1 and V2 and
7611 // blend them together.
7612 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7613 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7614 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7615 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7616 if (Mask[i] >= 0 && Mask[i] < Size) {
7617 V1Mask[i] = Mask[i];
7619 } else if (Mask[i] >= Size) {
7620 V2Mask[i] = Mask[i] - Size;
7621 BlendMask[i] = i + Size;
7624 // Try to lower with the simpler initial blend strategy unless one of the
7625 // input shuffles would be a no-op. We prefer to shuffle inputs as the
7626 // shuffle may be able to fold with a load or other benefit. However, when
7627 // we'll have to do 2x as many shuffles in order to achieve this, blending
7628 // first is a better strategy.
7629 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
7630 if (SDValue BlendPerm =
7631 lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
7634 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7635 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7636 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7639 /// \brief Try to lower a vector shuffle as a byte rotation.
7641 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7642 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7643 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7644 /// try to generically lower a vector shuffle through such an pattern. It
7645 /// does not check for the profitability of lowering either as PALIGNR or
7646 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7647 /// This matches shuffle vectors that look like:
7649 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7651 /// Essentially it concatenates V1 and V2, shifts right by some number of
7652 /// elements, and takes the low elements as the result. Note that while this is
7653 /// specified as a *right shift* because x86 is little-endian, it is a *left
7654 /// rotate* of the vector lanes.
7655 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7658 const X86Subtarget *Subtarget,
7659 SelectionDAG &DAG) {
7660 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7662 int NumElts = Mask.size();
7663 int NumLanes = VT.getSizeInBits() / 128;
7664 int NumLaneElts = NumElts / NumLanes;
7666 // We need to detect various ways of spelling a rotation:
7667 // [11, 12, 13, 14, 15, 0, 1, 2]
7668 // [-1, 12, 13, 14, -1, -1, 1, -1]
7669 // [-1, -1, -1, -1, -1, -1, 1, 2]
7670 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7671 // [-1, 4, 5, 6, -1, -1, 9, -1]
7672 // [-1, 4, 5, 6, -1, -1, -1, -1]
7675 for (int l = 0; l < NumElts; l += NumLaneElts) {
7676 for (int i = 0; i < NumLaneElts; ++i) {
7677 if (Mask[l + i] == -1)
7679 assert(Mask[l + i] >= 0 && "Only -1 is a valid negative mask element!");
7681 // Get the mod-Size index and lane correct it.
7682 int LaneIdx = (Mask[l + i] % NumElts) - l;
7683 // Make sure it was in this lane.
7684 if (LaneIdx < 0 || LaneIdx >= NumLaneElts)
7687 // Determine where a rotated vector would have started.
7688 int StartIdx = i - LaneIdx;
7690 // The identity rotation isn't interesting, stop.
7693 // If we found the tail of a vector the rotation must be the missing
7694 // front. If we found the head of a vector, it must be how much of the
7696 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumLaneElts - StartIdx;
7699 Rotation = CandidateRotation;
7700 else if (Rotation != CandidateRotation)
7701 // The rotations don't match, so we can't match this mask.
7704 // Compute which value this mask is pointing at.
7705 SDValue MaskV = Mask[l + i] < NumElts ? V1 : V2;
7707 // Compute which of the two target values this index should be assigned
7708 // to. This reflects whether the high elements are remaining or the low
7709 // elements are remaining.
7710 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7712 // Either set up this value if we've not encountered it before, or check
7713 // that it remains consistent.
7716 else if (TargetV != MaskV)
7717 // This may be a rotation, but it pulls from the inputs in some
7718 // unsupported interleaving.
7723 // Check that we successfully analyzed the mask, and normalize the results.
7724 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7725 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7731 // The actual rotate instruction rotates bytes, so we need to scale the
7732 // rotation based on how many bytes are in the vector lane.
7733 int Scale = 16 / NumLaneElts;
7735 // SSSE3 targets can use the palignr instruction.
7736 if (Subtarget->hasSSSE3()) {
7737 // Cast the inputs to i8 vector of correct length to match PALIGNR.
7738 MVT AlignVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
7739 Lo = DAG.getNode(ISD::BITCAST, DL, AlignVT, Lo);
7740 Hi = DAG.getNode(ISD::BITCAST, DL, AlignVT, Hi);
7742 return DAG.getNode(ISD::BITCAST, DL, VT,
7743 DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Hi, Lo,
7744 DAG.getConstant(Rotation * Scale, MVT::i8)));
7747 assert(VT.getSizeInBits() == 128 &&
7748 "Rotate-based lowering only supports 128-bit lowering!");
7749 assert(Mask.size() <= 16 &&
7750 "Can shuffle at most 16 bytes in a 128-bit vector!");
7752 // Default SSE2 implementation
7753 int LoByteShift = 16 - Rotation * Scale;
7754 int HiByteShift = Rotation * Scale;
7756 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7757 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7758 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7760 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7761 DAG.getConstant(8 * LoByteShift, MVT::i8));
7762 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7763 DAG.getConstant(8 * HiByteShift, MVT::i8));
7764 return DAG.getNode(ISD::BITCAST, DL, VT,
7765 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7768 /// \brief Compute whether each element of a shuffle is zeroable.
7770 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7771 /// Either it is an undef element in the shuffle mask, the element of the input
7772 /// referenced is undef, or the element of the input referenced is known to be
7773 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7774 /// as many lanes with this technique as possible to simplify the remaining
7776 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7777 SDValue V1, SDValue V2) {
7778 SmallBitVector Zeroable(Mask.size(), false);
7780 while (V1.getOpcode() == ISD::BITCAST)
7781 V1 = V1->getOperand(0);
7782 while (V2.getOpcode() == ISD::BITCAST)
7783 V2 = V2->getOperand(0);
7785 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7786 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7788 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7790 // Handle the easy cases.
7791 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7796 // If this is an index into a build_vector node (which has the same number
7797 // of elements), dig out the input value and use it.
7798 SDValue V = M < Size ? V1 : V2;
7799 if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
7802 SDValue Input = V.getOperand(M % Size);
7803 // The UNDEF opcode check really should be dead code here, but not quite
7804 // worth asserting on (it isn't invalid, just unexpected).
7805 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7812 /// \brief Try to emit a bitmask instruction for a shuffle.
7814 /// This handles cases where we can model a blend exactly as a bitmask due to
7815 /// one of the inputs being zeroable.
7816 static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
7817 SDValue V2, ArrayRef<int> Mask,
7818 SelectionDAG &DAG) {
7819 MVT EltVT = VT.getScalarType();
7820 int NumEltBits = EltVT.getSizeInBits();
7821 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
7822 SDValue Zero = DAG.getConstant(0, IntEltVT);
7823 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
7824 if (EltVT.isFloatingPoint()) {
7825 Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
7826 AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
7828 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
7829 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7831 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7834 if (Mask[i] % Size != i)
7835 return SDValue(); // Not a blend.
7837 V = Mask[i] < Size ? V1 : V2;
7838 else if (V != (Mask[i] < Size ? V1 : V2))
7839 return SDValue(); // Can only let one input through the mask.
7841 VMaskOps[i] = AllOnes;
7844 return SDValue(); // No non-zeroable elements!
7846 SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
7847 V = DAG.getNode(VT.isFloatingPoint()
7848 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
7853 /// \brief Try to lower a vector shuffle as a byte shift (shifts in zeros).
7855 /// Attempts to match a shuffle mask against the PSRLDQ and PSLLDQ
7856 /// byte-shift instructions. The mask must consist of a shifted sequential
7857 /// shuffle from one of the input vectors and zeroable elements for the
7858 /// remaining 'shifted in' elements.
7859 static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
7860 SDValue V2, ArrayRef<int> Mask,
7861 SelectionDAG &DAG) {
7862 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7864 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7866 int NumElts = VT.getVectorNumElements();
7867 int NumLanes = VT.getSizeInBits() / 128;
7868 int NumLaneElts = NumElts / NumLanes;
7869 int Scale = 16 / NumLaneElts;
7870 MVT ShiftVT = MVT::getVectorVT(MVT::i64, 2 * NumLanes);
7872 // PSLLDQ : (little-endian) left byte shift
7873 // [ zz, 0, 1, 2, 3, 4, 5, 6]
7874 // [ zz, zz, -1, -1, 2, 3, 4, -1]
7875 // [ zz, zz, zz, zz, zz, zz, -1, 1]
7876 // PSRLDQ : (little-endian) right byte shift
7877 // [ 5, 6, 7, zz, zz, zz, zz, zz]
7878 // [ -1, 5, 6, 7, zz, zz, zz, zz]
7879 // [ 1, 2, -1, -1, -1, -1, zz, zz]
7880 auto MatchByteShift = [&](int Shift) -> SDValue {
7881 bool MatchLeft = true, MatchRight = true;
7882 for (int l = 0; l < NumElts; l += NumLaneElts) {
7883 for (int i = 0; i < Shift; ++i)
7884 MatchLeft &= Zeroable[l + i];
7885 for (int i = NumLaneElts - Shift; i < NumLaneElts; ++i)
7886 MatchRight &= Zeroable[l + i];
7888 if (!(MatchLeft || MatchRight))
7891 bool MatchV1 = true, MatchV2 = true;
7892 for (int l = 0; l < NumElts; l += NumLaneElts) {
7893 unsigned Pos = MatchLeft ? Shift + l : l;
7894 unsigned Low = MatchLeft ? l : Shift + l;
7895 unsigned Len = NumLaneElts - Shift;
7896 MatchV1 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low);
7897 MatchV2 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low + NumElts);
7899 if (!(MatchV1 || MatchV2))
7902 int ByteShift = Shift * Scale;
7903 unsigned Op = MatchRight ? X86ISD::VSRLDQ : X86ISD::VSHLDQ;
7904 SDValue V = MatchV1 ? V1 : V2;
7905 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7906 V = DAG.getNode(Op, DL, ShiftVT, V,
7907 DAG.getConstant(ByteShift * 8, MVT::i8));
7908 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7911 for (int Shift = 1; Shift < NumLaneElts; ++Shift)
7912 if (SDValue S = MatchByteShift(Shift))
7919 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7921 /// Attempts to match a shuffle mask against the PSRL(W/D/Q) and PSLL(W/D/Q)
7922 /// SSE2 and AVX2 logical bit-shift instructions. The function matches
7923 /// elements from one of the input vectors shuffled to the left or right
7924 /// with zeroable elements 'shifted in'.
7925 static SDValue lowerVectorShuffleAsBitShift(SDLoc DL, MVT VT, SDValue V1,
7926 SDValue V2, ArrayRef<int> Mask,
7927 SelectionDAG &DAG) {
7928 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7930 int Size = Mask.size();
7931 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7933 // PSRL : (little-endian) right bit shift.
7936 // PSHL : (little-endian) left bit shift.
7938 // [ -1, 4, zz, -1 ]
7939 auto MatchBitShift = [&](int Shift, int Scale) -> SDValue {
7940 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7941 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
7942 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
7943 "Illegal integer vector type");
7945 bool MatchLeft = true, MatchRight = true;
7946 for (int i = 0; i != Size; i += Scale) {
7947 for (int j = 0; j != Shift; ++j) {
7948 MatchLeft &= Zeroable[i + j];
7950 for (int j = Scale - Shift; j != Scale; ++j) {
7951 MatchRight &= Zeroable[i + j];
7954 if (!(MatchLeft || MatchRight))
7957 bool MatchV1 = true, MatchV2 = true;
7958 for (int i = 0; i != Size; i += Scale) {
7959 unsigned Pos = MatchLeft ? i + Shift : i;
7960 unsigned Low = MatchLeft ? i : i + Shift;
7961 unsigned Len = Scale - Shift;
7962 MatchV1 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low);
7963 MatchV2 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low + Size);
7965 if (!(MatchV1 || MatchV2))
7968 // Cast the inputs to ShiftVT to match VSRLI/VSHLI and back again.
7969 unsigned OpCode = MatchLeft ? X86ISD::VSHLI : X86ISD::VSRLI;
7970 int ShiftAmt = Shift * VT.getScalarSizeInBits();
7971 SDValue V = MatchV1 ? V1 : V2;
7972 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7973 V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
7974 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7977 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7978 // keep doubling the size of the integer elements up to that. We can
7979 // then shift the elements of the integer vector by whole multiples of
7980 // their width within the elements of the larger integer vector. Test each
7981 // multiple to see if we can find a match with the moved element indices
7982 // and that the shifted in elements are all zeroable.
7983 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 64; Scale *= 2)
7984 for (int Shift = 1; Shift != Scale; ++Shift)
7985 if (SDValue BitShift = MatchBitShift(Shift, Scale))
7992 /// \brief Lower a vector shuffle as a zero or any extension.
7994 /// Given a specific number of elements, element bit width, and extension
7995 /// stride, produce either a zero or any extension based on the available
7996 /// features of the subtarget.
7997 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7998 SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
7999 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8000 assert(Scale > 1 && "Need a scale to extend.");
8001 int NumElements = VT.getVectorNumElements();
8002 int EltBits = VT.getScalarSizeInBits();
8003 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
8004 "Only 8, 16, and 32 bit elements can be extended.");
8005 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
8007 // Found a valid zext mask! Try various lowering strategies based on the
8008 // input type and available ISA extensions.
8009 if (Subtarget->hasSSE41()) {
8010 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
8011 NumElements / Scale);
8012 return DAG.getNode(ISD::BITCAST, DL, VT,
8013 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
8016 // For any extends we can cheat for larger element sizes and use shuffle
8017 // instructions that can fold with a load and/or copy.
8018 if (AnyExt && EltBits == 32) {
8019 int PSHUFDMask[4] = {0, -1, 1, -1};
8021 ISD::BITCAST, DL, VT,
8022 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8023 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8024 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8026 if (AnyExt && EltBits == 16 && Scale > 2) {
8027 int PSHUFDMask[4] = {0, -1, 0, -1};
8028 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8029 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8030 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
8031 int PSHUFHWMask[4] = {1, -1, -1, -1};
8033 ISD::BITCAST, DL, VT,
8034 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
8035 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
8036 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
8039 // If this would require more than 2 unpack instructions to expand, use
8040 // pshufb when available. We can only use more than 2 unpack instructions
8041 // when zero extending i8 elements which also makes it easier to use pshufb.
8042 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
8043 assert(NumElements == 16 && "Unexpected byte vector width!");
8044 SDValue PSHUFBMask[16];
8045 for (int i = 0; i < 16; ++i)
8047 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
8048 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
8049 return DAG.getNode(ISD::BITCAST, DL, VT,
8050 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
8051 DAG.getNode(ISD::BUILD_VECTOR, DL,
8052 MVT::v16i8, PSHUFBMask)));
8055 // Otherwise emit a sequence of unpacks.
8057 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
8058 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
8059 : getZeroVector(InputVT, Subtarget, DAG, DL);
8060 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
8061 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
8065 } while (Scale > 1);
8066 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
8069 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
8071 /// This routine will try to do everything in its power to cleverly lower
8072 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
8073 /// check for the profitability of this lowering, it tries to aggressively
8074 /// match this pattern. It will use all of the micro-architectural details it
8075 /// can to emit an efficient lowering. It handles both blends with all-zero
8076 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
8077 /// masking out later).
8079 /// The reason we have dedicated lowering for zext-style shuffles is that they
8080 /// are both incredibly common and often quite performance sensitive.
8081 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
8082 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8083 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8084 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8086 int Bits = VT.getSizeInBits();
8087 int NumElements = VT.getVectorNumElements();
8088 assert(VT.getScalarSizeInBits() <= 32 &&
8089 "Exceeds 32-bit integer zero extension limit");
8090 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
8092 // Define a helper function to check a particular ext-scale and lower to it if
8094 auto Lower = [&](int Scale) -> SDValue {
8097 for (int i = 0; i < NumElements; ++i) {
8099 continue; // Valid anywhere but doesn't tell us anything.
8100 if (i % Scale != 0) {
8101 // Each of the extended elements need to be zeroable.
8105 // We no longer are in the anyext case.
8110 // Each of the base elements needs to be consecutive indices into the
8111 // same input vector.
8112 SDValue V = Mask[i] < NumElements ? V1 : V2;
8115 else if (InputV != V)
8116 return SDValue(); // Flip-flopping inputs.
8118 if (Mask[i] % NumElements != i / Scale)
8119 return SDValue(); // Non-consecutive strided elements.
8122 // If we fail to find an input, we have a zero-shuffle which should always
8123 // have already been handled.
8124 // FIXME: Maybe handle this here in case during blending we end up with one?
8128 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8129 DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
8132 // The widest scale possible for extending is to a 64-bit integer.
8133 assert(Bits % 64 == 0 &&
8134 "The number of bits in a vector must be divisible by 64 on x86!");
8135 int NumExtElements = Bits / 64;
8137 // Each iteration, try extending the elements half as much, but into twice as
8139 for (; NumExtElements < NumElements; NumExtElements *= 2) {
8140 assert(NumElements % NumExtElements == 0 &&
8141 "The input vector size must be divisible by the extended size.");
8142 if (SDValue V = Lower(NumElements / NumExtElements))
8146 // General extends failed, but 128-bit vectors may be able to use MOVQ.
8150 // Returns one of the source operands if the shuffle can be reduced to a
8151 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
8152 auto CanZExtLowHalf = [&]() {
8153 for (int i = NumElements / 2; i != NumElements; ++i)
8156 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
8158 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
8163 if (SDValue V = CanZExtLowHalf()) {
8164 V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
8165 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
8166 return DAG.getNode(ISD::BITCAST, DL, VT, V);
8169 // No viable ext lowering found.
8173 /// \brief Try to get a scalar value for a specific element of a vector.
8175 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
8176 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
8177 SelectionDAG &DAG) {
8178 MVT VT = V.getSimpleValueType();
8179 MVT EltVT = VT.getVectorElementType();
8180 while (V.getOpcode() == ISD::BITCAST)
8181 V = V.getOperand(0);
8182 // If the bitcasts shift the element size, we can't extract an equivalent
8184 MVT NewVT = V.getSimpleValueType();
8185 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8188 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8189 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8190 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8195 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8197 /// This is particularly important because the set of instructions varies
8198 /// significantly based on whether the operand is a load or not.
8199 static bool isShuffleFoldableLoad(SDValue V) {
8200 while (V.getOpcode() == ISD::BITCAST)
8201 V = V.getOperand(0);
8203 return ISD::isNON_EXTLoad(V.getNode());
8206 /// \brief Try to lower insertion of a single element into a zero vector.
8208 /// This is a common pattern that we have especially efficient patterns to lower
8209 /// across all subtarget feature sets.
8210 static SDValue lowerVectorShuffleAsElementInsertion(
8211 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8212 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8213 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8215 MVT EltVT = VT.getVectorElementType();
8217 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8218 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8220 bool IsV1Zeroable = true;
8221 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8222 if (i != V2Index && !Zeroable[i]) {
8223 IsV1Zeroable = false;
8227 // Check for a single input from a SCALAR_TO_VECTOR node.
8228 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8229 // all the smarts here sunk into that routine. However, the current
8230 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8231 // vector shuffle lowering is dead.
8232 if (SDValue V2S = getScalarValueForVectorElement(
8233 V2, Mask[V2Index] - Mask.size(), DAG)) {
8234 // We need to zext the scalar if it is smaller than an i32.
8235 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8236 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8237 // Using zext to expand a narrow element won't work for non-zero
8242 // Zero-extend directly to i32.
8244 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8246 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8247 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8248 EltVT == MVT::i16) {
8249 // Either not inserting from the low element of the input or the input
8250 // element size is too small to use VZEXT_MOVL to clear the high bits.
8254 if (!IsV1Zeroable) {
8255 // If V1 can't be treated as a zero vector we have fewer options to lower
8256 // this. We can't support integer vectors or non-zero targets cheaply, and
8257 // the V1 elements can't be permuted in any way.
8258 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8259 if (!VT.isFloatingPoint() || V2Index != 0)
8261 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8262 V1Mask[V2Index] = -1;
8263 if (!isNoopShuffleMask(V1Mask))
8265 // This is essentially a special case blend operation, but if we have
8266 // general purpose blend operations, they are always faster. Bail and let
8267 // the rest of the lowering handle these as blends.
8268 if (Subtarget->hasSSE41())
8271 // Otherwise, use MOVSD or MOVSS.
8272 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8273 "Only two types of floating point element types to handle!");
8274 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8278 // This lowering only works for the low element with floating point vectors.
8279 if (VT.isFloatingPoint() && V2Index != 0)
8282 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8284 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8287 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8288 // the desired position. Otherwise it is more efficient to do a vector
8289 // shift left. We know that we can do a vector shift left because all
8290 // the inputs are zero.
8291 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8292 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8293 V2Shuffle[V2Index] = 0;
8294 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8296 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8298 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8300 V2Index * EltVT.getSizeInBits(),
8301 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8302 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8308 /// \brief Try to lower broadcast of a single element.
8310 /// For convenience, this code also bundles all of the subtarget feature set
8311 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8312 /// a convenient way to factor it out.
8313 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8315 const X86Subtarget *Subtarget,
8316 SelectionDAG &DAG) {
8317 if (!Subtarget->hasAVX())
8319 if (VT.isInteger() && !Subtarget->hasAVX2())
8322 // Check that the mask is a broadcast.
8323 int BroadcastIdx = -1;
8325 if (M >= 0 && BroadcastIdx == -1)
8327 else if (M >= 0 && M != BroadcastIdx)
8330 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8331 "a sorted mask where the broadcast "
8334 // Go up the chain of (vector) values to try and find a scalar load that
8335 // we can combine with the broadcast.
8337 switch (V.getOpcode()) {
8338 case ISD::CONCAT_VECTORS: {
8339 int OperandSize = Mask.size() / V.getNumOperands();
8340 V = V.getOperand(BroadcastIdx / OperandSize);
8341 BroadcastIdx %= OperandSize;
8345 case ISD::INSERT_SUBVECTOR: {
8346 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8347 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8351 int BeginIdx = (int)ConstantIdx->getZExtValue();
8353 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8354 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8355 BroadcastIdx -= BeginIdx;
8366 // Check if this is a broadcast of a scalar. We special case lowering
8367 // for scalars so that we can more effectively fold with loads.
8368 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8369 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8370 V = V.getOperand(BroadcastIdx);
8372 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8374 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8376 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8377 // We can't broadcast from a vector register w/o AVX2, and we can only
8378 // broadcast from the zero-element of a vector register.
8382 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8385 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8386 // INSERTPS when the V1 elements are already in the correct locations
8387 // because otherwise we can just always use two SHUFPS instructions which
8388 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8389 // perform INSERTPS if a single V1 element is out of place and all V2
8390 // elements are zeroable.
8391 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8393 SelectionDAG &DAG) {
8394 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8395 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8396 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8397 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8399 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8402 int V1DstIndex = -1;
8403 int V2DstIndex = -1;
8404 bool V1UsedInPlace = false;
8406 for (int i = 0; i < 4; ++i) {
8407 // Synthesize a zero mask from the zeroable elements (includes undefs).
8413 // Flag if we use any V1 inputs in place.
8415 V1UsedInPlace = true;
8419 // We can only insert a single non-zeroable element.
8420 if (V1DstIndex != -1 || V2DstIndex != -1)
8424 // V1 input out of place for insertion.
8427 // V2 input for insertion.
8432 // Don't bother if we have no (non-zeroable) element for insertion.
8433 if (V1DstIndex == -1 && V2DstIndex == -1)
8436 // Determine element insertion src/dst indices. The src index is from the
8437 // start of the inserted vector, not the start of the concatenated vector.
8438 unsigned V2SrcIndex = 0;
8439 if (V1DstIndex != -1) {
8440 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8441 // and don't use the original V2 at all.
8442 V2SrcIndex = Mask[V1DstIndex];
8443 V2DstIndex = V1DstIndex;
8446 V2SrcIndex = Mask[V2DstIndex] - 4;
8449 // If no V1 inputs are used in place, then the result is created only from
8450 // the zero mask and the V2 insertion - so remove V1 dependency.
8452 V1 = DAG.getUNDEF(MVT::v4f32);
8454 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8455 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8457 // Insert the V2 element into the desired position.
8459 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8460 DAG.getConstant(InsertPSMask, MVT::i8));
8463 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8465 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8466 /// support for floating point shuffles but not integer shuffles. These
8467 /// instructions will incur a domain crossing penalty on some chips though so
8468 /// it is better to avoid lowering through this for integer vectors where
8470 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8471 const X86Subtarget *Subtarget,
8472 SelectionDAG &DAG) {
8474 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8475 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8476 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8477 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8478 ArrayRef<int> Mask = SVOp->getMask();
8479 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8481 if (isSingleInputShuffleMask(Mask)) {
8482 // Use low duplicate instructions for masks that match their pattern.
8483 if (Subtarget->hasSSE3())
8484 if (isShuffleEquivalent(V1, V2, Mask, 0, 0))
8485 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8487 // Straight shuffle of a single input vector. Simulate this by using the
8488 // single input as both of the "inputs" to this instruction..
8489 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8491 if (Subtarget->hasAVX()) {
8492 // If we have AVX, we can use VPERMILPS which will allow folding a load
8493 // into the shuffle.
8494 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8495 DAG.getConstant(SHUFPDMask, MVT::i8));
8498 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8499 DAG.getConstant(SHUFPDMask, MVT::i8));
8501 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8502 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8504 // If we have a single input, insert that into V1 if we can do so cheaply.
8505 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8506 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8507 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8509 // Try inverting the insertion since for v2 masks it is easy to do and we
8510 // can't reliably sort the mask one way or the other.
8511 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8512 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8513 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8514 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8518 // Try to use one of the special instruction patterns to handle two common
8519 // blend patterns if a zero-blend above didn't work.
8520 if (isShuffleEquivalent(V1, V2, Mask, 0, 3) || isShuffleEquivalent(V1, V2, Mask, 1, 3))
8521 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8522 // We can either use a special instruction to load over the low double or
8523 // to move just the low double.
8525 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8527 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8529 if (Subtarget->hasSSE41())
8530 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8534 // Use dedicated unpack instructions for masks that match their pattern.
8535 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8536 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8537 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8538 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8540 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8541 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8542 DAG.getConstant(SHUFPDMask, MVT::i8));
8545 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8547 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8548 /// the integer unit to minimize domain crossing penalties. However, for blends
8549 /// it falls back to the floating point shuffle operation with appropriate bit
8551 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8552 const X86Subtarget *Subtarget,
8553 SelectionDAG &DAG) {
8555 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8556 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8557 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8558 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8559 ArrayRef<int> Mask = SVOp->getMask();
8560 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8562 if (isSingleInputShuffleMask(Mask)) {
8563 // Check for being able to broadcast a single element.
8564 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8565 Mask, Subtarget, DAG))
8568 // Straight shuffle of a single input vector. For everything from SSE2
8569 // onward this has a single fast instruction with no scary immediates.
8570 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8571 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8572 int WidenedMask[4] = {
8573 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8574 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8576 ISD::BITCAST, DL, MVT::v2i64,
8577 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8578 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8581 // Try to use byte shift instructions.
8582 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8583 DL, MVT::v2i64, V1, V2, Mask, DAG))
8586 // If we have a single input from V2 insert that into V1 if we can do so
8588 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8589 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8590 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8592 // Try inverting the insertion since for v2 masks it is easy to do and we
8593 // can't reliably sort the mask one way or the other.
8594 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8595 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8596 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8597 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8601 // We have different paths for blend lowering, but they all must use the
8602 // *exact* same predicate.
8603 bool IsBlendSupported = Subtarget->hasSSE41();
8604 if (IsBlendSupported)
8605 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8609 // Use dedicated unpack instructions for masks that match their pattern.
8610 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8611 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8612 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8613 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8615 // Try to use byte rotation instructions.
8616 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8617 if (Subtarget->hasSSSE3())
8618 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8619 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8622 // If we have direct support for blends, we should lower by decomposing into
8623 // a permute. That will be faster than the domain cross.
8624 if (IsBlendSupported)
8625 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
8628 // We implement this with SHUFPD which is pretty lame because it will likely
8629 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8630 // However, all the alternatives are still more cycles and newer chips don't
8631 // have this problem. It would be really nice if x86 had better shuffles here.
8632 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8633 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8634 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8635 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8638 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
8640 /// This is used to disable more specialized lowerings when the shufps lowering
8641 /// will happen to be efficient.
8642 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
8643 // This routine only handles 128-bit shufps.
8644 assert(Mask.size() == 4 && "Unsupported mask size!");
8646 // To lower with a single SHUFPS we need to have the low half and high half
8647 // each requiring a single input.
8648 if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
8650 if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
8656 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8658 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8659 /// It makes no assumptions about whether this is the *best* lowering, it simply
8661 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8662 ArrayRef<int> Mask, SDValue V1,
8663 SDValue V2, SelectionDAG &DAG) {
8664 SDValue LowV = V1, HighV = V2;
8665 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8668 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8670 if (NumV2Elements == 1) {
8672 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8675 // Compute the index adjacent to V2Index and in the same half by toggling
8677 int V2AdjIndex = V2Index ^ 1;
8679 if (Mask[V2AdjIndex] == -1) {
8680 // Handles all the cases where we have a single V2 element and an undef.
8681 // This will only ever happen in the high lanes because we commute the
8682 // vector otherwise.
8684 std::swap(LowV, HighV);
8685 NewMask[V2Index] -= 4;
8687 // Handle the case where the V2 element ends up adjacent to a V1 element.
8688 // To make this work, blend them together as the first step.
8689 int V1Index = V2AdjIndex;
8690 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8691 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8692 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8694 // Now proceed to reconstruct the final blend as we have the necessary
8695 // high or low half formed.
8702 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8703 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8705 } else if (NumV2Elements == 2) {
8706 if (Mask[0] < 4 && Mask[1] < 4) {
8707 // Handle the easy case where we have V1 in the low lanes and V2 in the
8711 } else if (Mask[2] < 4 && Mask[3] < 4) {
8712 // We also handle the reversed case because this utility may get called
8713 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8714 // arrange things in the right direction.
8720 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8721 // trying to place elements directly, just blend them and set up the final
8722 // shuffle to place them.
8724 // The first two blend mask elements are for V1, the second two are for
8726 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8727 Mask[2] < 4 ? Mask[2] : Mask[3],
8728 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8729 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8730 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8731 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8733 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8736 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8737 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8738 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8739 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8742 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8743 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8746 /// \brief Lower 4-lane 32-bit floating point shuffles.
8748 /// Uses instructions exclusively from the floating point unit to minimize
8749 /// domain crossing penalties, as these are sufficient to implement all v4f32
8751 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8752 const X86Subtarget *Subtarget,
8753 SelectionDAG &DAG) {
8755 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8756 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8757 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8758 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8759 ArrayRef<int> Mask = SVOp->getMask();
8760 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8763 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8765 if (NumV2Elements == 0) {
8766 // Check for being able to broadcast a single element.
8767 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8768 Mask, Subtarget, DAG))
8771 // Use even/odd duplicate instructions for masks that match their pattern.
8772 if (Subtarget->hasSSE3()) {
8773 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
8774 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8775 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3))
8776 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8779 if (Subtarget->hasAVX()) {
8780 // If we have AVX, we can use VPERMILPS which will allow folding a load
8781 // into the shuffle.
8782 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8783 getV4X86ShuffleImm8ForMask(Mask, DAG));
8786 // Otherwise, use a straight shuffle of a single input vector. We pass the
8787 // input vector to both operands to simulate this with a SHUFPS.
8788 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8789 getV4X86ShuffleImm8ForMask(Mask, DAG));
8792 // There are special ways we can lower some single-element blends. However, we
8793 // have custom ways we can lower more complex single-element blends below that
8794 // we defer to if both this and BLENDPS fail to match, so restrict this to
8795 // when the V2 input is targeting element 0 of the mask -- that is the fast
8797 if (NumV2Elements == 1 && Mask[0] >= 4)
8798 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8799 Mask, Subtarget, DAG))
8802 if (Subtarget->hasSSE41()) {
8803 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8807 // Use INSERTPS if we can complete the shuffle efficiently.
8808 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8811 if (!isSingleSHUFPSMask(Mask))
8812 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
8813 DL, MVT::v4f32, V1, V2, Mask, DAG))
8817 // Use dedicated unpack instructions for masks that match their pattern.
8818 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8819 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8820 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8821 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8823 // Otherwise fall back to a SHUFPS lowering strategy.
8824 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8827 /// \brief Lower 4-lane i32 vector shuffles.
8829 /// We try to handle these with integer-domain shuffles where we can, but for
8830 /// blends we use the floating point domain blend instructions.
8831 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8832 const X86Subtarget *Subtarget,
8833 SelectionDAG &DAG) {
8835 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8836 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8837 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8838 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8839 ArrayRef<int> Mask = SVOp->getMask();
8840 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8842 // Whenever we can lower this as a zext, that instruction is strictly faster
8843 // than any alternative. It also allows us to fold memory operands into the
8844 // shuffle in many cases.
8845 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8846 Mask, Subtarget, DAG))
8850 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8852 if (NumV2Elements == 0) {
8853 // Check for being able to broadcast a single element.
8854 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8855 Mask, Subtarget, DAG))
8858 // Straight shuffle of a single input vector. For everything from SSE2
8859 // onward this has a single fast instruction with no scary immediates.
8860 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8861 // but we aren't actually going to use the UNPCK instruction because doing
8862 // so prevents folding a load into this instruction or making a copy.
8863 const int UnpackLoMask[] = {0, 0, 1, 1};
8864 const int UnpackHiMask[] = {2, 2, 3, 3};
8865 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 1, 1))
8866 Mask = UnpackLoMask;
8867 else if (isShuffleEquivalent(V1, V2, Mask, 2, 2, 3, 3))
8868 Mask = UnpackHiMask;
8870 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8871 getV4X86ShuffleImm8ForMask(Mask, DAG));
8874 // Try to use bit shift instructions.
8875 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8876 DL, MVT::v4i32, V1, V2, Mask, DAG))
8879 // Try to use byte shift instructions.
8880 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8881 DL, MVT::v4i32, V1, V2, Mask, DAG))
8884 // There are special ways we can lower some single-element blends.
8885 if (NumV2Elements == 1)
8886 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8887 Mask, Subtarget, DAG))
8890 // We have different paths for blend lowering, but they all must use the
8891 // *exact* same predicate.
8892 bool IsBlendSupported = Subtarget->hasSSE41();
8893 if (IsBlendSupported)
8894 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8898 if (SDValue Masked =
8899 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
8902 // Use dedicated unpack instructions for masks that match their pattern.
8903 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8904 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
8905 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8906 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
8908 // Try to use byte rotation instructions.
8909 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8910 if (Subtarget->hasSSSE3())
8911 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8912 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
8915 // If we have direct support for blends, we should lower by decomposing into
8916 // a permute. That will be faster than the domain cross.
8917 if (IsBlendSupported)
8918 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
8921 // We implement this with SHUFPS because it can blend from two vectors.
8922 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
8923 // up the inputs, bypassing domain shift penalties that we would encur if we
8924 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
8926 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
8927 DAG.getVectorShuffle(
8929 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
8930 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
8933 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
8934 /// shuffle lowering, and the most complex part.
8936 /// The lowering strategy is to try to form pairs of input lanes which are
8937 /// targeted at the same half of the final vector, and then use a dword shuffle
8938 /// to place them onto the right half, and finally unpack the paired lanes into
8939 /// their final position.
8941 /// The exact breakdown of how to form these dword pairs and align them on the
8942 /// correct sides is really tricky. See the comments within the function for
8943 /// more of the details.
8944 static SDValue lowerV8I16SingleInputVectorShuffle(
8945 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
8946 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8947 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
8948 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
8949 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
8951 SmallVector<int, 4> LoInputs;
8952 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
8953 [](int M) { return M >= 0; });
8954 std::sort(LoInputs.begin(), LoInputs.end());
8955 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
8956 SmallVector<int, 4> HiInputs;
8957 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
8958 [](int M) { return M >= 0; });
8959 std::sort(HiInputs.begin(), HiInputs.end());
8960 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
8962 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
8963 int NumHToL = LoInputs.size() - NumLToL;
8965 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
8966 int NumHToH = HiInputs.size() - NumLToH;
8967 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
8968 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
8969 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
8970 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
8972 // Check for being able to broadcast a single element.
8973 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
8974 Mask, Subtarget, DAG))
8977 // Try to use bit shift instructions.
8978 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8979 DL, MVT::v8i16, V, V, Mask, DAG))
8982 // Try to use byte shift instructions.
8983 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8984 DL, MVT::v8i16, V, V, Mask, DAG))
8987 // Use dedicated unpack instructions for masks that match their pattern.
8988 if (isShuffleEquivalent(V, V, Mask, 0, 0, 1, 1, 2, 2, 3, 3))
8989 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
8990 if (isShuffleEquivalent(V, V, Mask, 4, 4, 5, 5, 6, 6, 7, 7))
8991 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
8993 // Try to use byte rotation instructions.
8994 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8995 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
8998 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
8999 // such inputs we can swap two of the dwords across the half mark and end up
9000 // with <=2 inputs to each half in each half. Once there, we can fall through
9001 // to the generic code below. For example:
9003 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9004 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
9006 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
9007 // and an existing 2-into-2 on the other half. In this case we may have to
9008 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
9009 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
9010 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
9011 // because any other situation (including a 3-into-1 or 1-into-3 in the other
9012 // half than the one we target for fixing) will be fixed when we re-enter this
9013 // path. We will also combine away any sequence of PSHUFD instructions that
9014 // result into a single instruction. Here is an example of the tricky case:
9016 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9017 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
9019 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
9021 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
9022 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
9024 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
9025 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
9027 // The result is fine to be handled by the generic logic.
9028 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
9029 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
9030 int AOffset, int BOffset) {
9031 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
9032 "Must call this with A having 3 or 1 inputs from the A half.");
9033 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
9034 "Must call this with B having 1 or 3 inputs from the B half.");
9035 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
9036 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
9038 // Compute the index of dword with only one word among the three inputs in
9039 // a half by taking the sum of the half with three inputs and subtracting
9040 // the sum of the actual three inputs. The difference is the remaining
9043 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
9044 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
9045 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
9046 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
9047 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
9048 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
9049 int TripleNonInputIdx =
9050 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
9051 TripleDWord = TripleNonInputIdx / 2;
9053 // We use xor with one to compute the adjacent DWord to whichever one the
9055 OneInputDWord = (OneInput / 2) ^ 1;
9057 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
9058 // and BToA inputs. If there is also such a problem with the BToB and AToB
9059 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
9060 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
9061 // is essential that we don't *create* a 3<-1 as then we might oscillate.
9062 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
9063 // Compute how many inputs will be flipped by swapping these DWords. We
9065 // to balance this to ensure we don't form a 3-1 shuffle in the other
9067 int NumFlippedAToBInputs =
9068 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
9069 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
9070 int NumFlippedBToBInputs =
9071 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
9072 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
9073 if ((NumFlippedAToBInputs == 1 &&
9074 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
9075 (NumFlippedBToBInputs == 1 &&
9076 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
9077 // We choose whether to fix the A half or B half based on whether that
9078 // half has zero flipped inputs. At zero, we may not be able to fix it
9079 // with that half. We also bias towards fixing the B half because that
9080 // will more commonly be the high half, and we have to bias one way.
9081 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
9082 ArrayRef<int> Inputs) {
9083 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
9084 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
9085 PinnedIdx ^ 1) != Inputs.end();
9086 // Determine whether the free index is in the flipped dword or the
9087 // unflipped dword based on where the pinned index is. We use this bit
9088 // in an xor to conditionally select the adjacent dword.
9089 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
9090 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9091 FixFreeIdx) != Inputs.end();
9092 if (IsFixIdxInput == IsFixFreeIdxInput)
9094 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9095 FixFreeIdx) != Inputs.end();
9096 assert(IsFixIdxInput != IsFixFreeIdxInput &&
9097 "We need to be changing the number of flipped inputs!");
9098 int PSHUFHalfMask[] = {0, 1, 2, 3};
9099 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
9100 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
9102 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
9105 if (M != -1 && M == FixIdx)
9107 else if (M != -1 && M == FixFreeIdx)
9110 if (NumFlippedBToBInputs != 0) {
9112 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9113 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
9115 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
9117 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9118 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
9123 int PSHUFDMask[] = {0, 1, 2, 3};
9124 PSHUFDMask[ADWord] = BDWord;
9125 PSHUFDMask[BDWord] = ADWord;
9126 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9127 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9128 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9129 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9131 // Adjust the mask to match the new locations of A and B.
9133 if (M != -1 && M/2 == ADWord)
9134 M = 2 * BDWord + M % 2;
9135 else if (M != -1 && M/2 == BDWord)
9136 M = 2 * ADWord + M % 2;
9138 // Recurse back into this routine to re-compute state now that this isn't
9139 // a 3 and 1 problem.
9140 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9143 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
9144 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
9145 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
9146 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
9148 // At this point there are at most two inputs to the low and high halves from
9149 // each half. That means the inputs can always be grouped into dwords and
9150 // those dwords can then be moved to the correct half with a dword shuffle.
9151 // We use at most one low and one high word shuffle to collect these paired
9152 // inputs into dwords, and finally a dword shuffle to place them.
9153 int PSHUFLMask[4] = {-1, -1, -1, -1};
9154 int PSHUFHMask[4] = {-1, -1, -1, -1};
9155 int PSHUFDMask[4] = {-1, -1, -1, -1};
9157 // First fix the masks for all the inputs that are staying in their
9158 // original halves. This will then dictate the targets of the cross-half
9160 auto fixInPlaceInputs =
9161 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9162 MutableArrayRef<int> SourceHalfMask,
9163 MutableArrayRef<int> HalfMask, int HalfOffset) {
9164 if (InPlaceInputs.empty())
9166 if (InPlaceInputs.size() == 1) {
9167 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9168 InPlaceInputs[0] - HalfOffset;
9169 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9172 if (IncomingInputs.empty()) {
9173 // Just fix all of the in place inputs.
9174 for (int Input : InPlaceInputs) {
9175 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9176 PSHUFDMask[Input / 2] = Input / 2;
9181 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9182 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9183 InPlaceInputs[0] - HalfOffset;
9184 // Put the second input next to the first so that they are packed into
9185 // a dword. We find the adjacent index by toggling the low bit.
9186 int AdjIndex = InPlaceInputs[0] ^ 1;
9187 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9188 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9189 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9191 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9192 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9194 // Now gather the cross-half inputs and place them into a free dword of
9195 // their target half.
9196 // FIXME: This operation could almost certainly be simplified dramatically to
9197 // look more like the 3-1 fixing operation.
9198 auto moveInputsToRightHalf = [&PSHUFDMask](
9199 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9200 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9201 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9203 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9204 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
9206 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9208 int LowWord = Word & ~1;
9209 int HighWord = Word | 1;
9210 return isWordClobbered(SourceHalfMask, LowWord) ||
9211 isWordClobbered(SourceHalfMask, HighWord);
9214 if (IncomingInputs.empty())
9217 if (ExistingInputs.empty()) {
9218 // Map any dwords with inputs from them into the right half.
9219 for (int Input : IncomingInputs) {
9220 // If the source half mask maps over the inputs, turn those into
9221 // swaps and use the swapped lane.
9222 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9223 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
9224 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9225 Input - SourceOffset;
9226 // We have to swap the uses in our half mask in one sweep.
9227 for (int &M : HalfMask)
9228 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9230 else if (M == Input)
9231 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9233 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9234 Input - SourceOffset &&
9235 "Previous placement doesn't match!");
9237 // Note that this correctly re-maps both when we do a swap and when
9238 // we observe the other side of the swap above. We rely on that to
9239 // avoid swapping the members of the input list directly.
9240 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9243 // Map the input's dword into the correct half.
9244 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9245 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9247 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9249 "Previous placement doesn't match!");
9252 // And just directly shift any other-half mask elements to be same-half
9253 // as we will have mirrored the dword containing the element into the
9254 // same position within that half.
9255 for (int &M : HalfMask)
9256 if (M >= SourceOffset && M < SourceOffset + 4) {
9257 M = M - SourceOffset + DestOffset;
9258 assert(M >= 0 && "This should never wrap below zero!");
9263 // Ensure we have the input in a viable dword of its current half. This
9264 // is particularly tricky because the original position may be clobbered
9265 // by inputs being moved and *staying* in that half.
9266 if (IncomingInputs.size() == 1) {
9267 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9268 int InputFixed = std::find(std::begin(SourceHalfMask),
9269 std::end(SourceHalfMask), -1) -
9270 std::begin(SourceHalfMask) + SourceOffset;
9271 SourceHalfMask[InputFixed - SourceOffset] =
9272 IncomingInputs[0] - SourceOffset;
9273 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9275 IncomingInputs[0] = InputFixed;
9277 } else if (IncomingInputs.size() == 2) {
9278 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9279 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9280 // We have two non-adjacent or clobbered inputs we need to extract from
9281 // the source half. To do this, we need to map them into some adjacent
9282 // dword slot in the source mask.
9283 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9284 IncomingInputs[1] - SourceOffset};
9286 // If there is a free slot in the source half mask adjacent to one of
9287 // the inputs, place the other input in it. We use (Index XOR 1) to
9288 // compute an adjacent index.
9289 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9290 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9291 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9292 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9293 InputsFixed[1] = InputsFixed[0] ^ 1;
9294 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9295 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9296 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9297 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9298 InputsFixed[0] = InputsFixed[1] ^ 1;
9299 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9300 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9301 // The two inputs are in the same DWord but it is clobbered and the
9302 // adjacent DWord isn't used at all. Move both inputs to the free
9304 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9305 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9306 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9307 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9309 // The only way we hit this point is if there is no clobbering
9310 // (because there are no off-half inputs to this half) and there is no
9311 // free slot adjacent to one of the inputs. In this case, we have to
9312 // swap an input with a non-input.
9313 for (int i = 0; i < 4; ++i)
9314 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9315 "We can't handle any clobbers here!");
9316 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9317 "Cannot have adjacent inputs here!");
9319 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9320 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9322 // We also have to update the final source mask in this case because
9323 // it may need to undo the above swap.
9324 for (int &M : FinalSourceHalfMask)
9325 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9326 M = InputsFixed[1] + SourceOffset;
9327 else if (M == InputsFixed[1] + SourceOffset)
9328 M = (InputsFixed[0] ^ 1) + SourceOffset;
9330 InputsFixed[1] = InputsFixed[0] ^ 1;
9333 // Point everything at the fixed inputs.
9334 for (int &M : HalfMask)
9335 if (M == IncomingInputs[0])
9336 M = InputsFixed[0] + SourceOffset;
9337 else if (M == IncomingInputs[1])
9338 M = InputsFixed[1] + SourceOffset;
9340 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9341 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9344 llvm_unreachable("Unhandled input size!");
9347 // Now hoist the DWord down to the right half.
9348 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9349 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9350 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9351 for (int &M : HalfMask)
9352 for (int Input : IncomingInputs)
9354 M = FreeDWord * 2 + Input % 2;
9356 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9357 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9358 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9359 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9361 // Now enact all the shuffles we've computed to move the inputs into their
9363 if (!isNoopShuffleMask(PSHUFLMask))
9364 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9365 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9366 if (!isNoopShuffleMask(PSHUFHMask))
9367 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9368 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9369 if (!isNoopShuffleMask(PSHUFDMask))
9370 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9371 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9372 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9373 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9375 // At this point, each half should contain all its inputs, and we can then
9376 // just shuffle them into their final position.
9377 assert(std::count_if(LoMask.begin(), LoMask.end(),
9378 [](int M) { return M >= 4; }) == 0 &&
9379 "Failed to lift all the high half inputs to the low mask!");
9380 assert(std::count_if(HiMask.begin(), HiMask.end(),
9381 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9382 "Failed to lift all the low half inputs to the high mask!");
9384 // Do a half shuffle for the low mask.
9385 if (!isNoopShuffleMask(LoMask))
9386 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9387 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9389 // Do a half shuffle with the high mask after shifting its values down.
9390 for (int &M : HiMask)
9393 if (!isNoopShuffleMask(HiMask))
9394 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9395 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9400 /// \brief Detect whether the mask pattern should be lowered through
9403 /// This essentially tests whether viewing the mask as an interleaving of two
9404 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9405 /// lowering it through interleaving is a significantly better strategy.
9406 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9407 int NumEvenInputs[2] = {0, 0};
9408 int NumOddInputs[2] = {0, 0};
9409 int NumLoInputs[2] = {0, 0};
9410 int NumHiInputs[2] = {0, 0};
9411 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9415 int InputIdx = Mask[i] >= Size;
9418 ++NumLoInputs[InputIdx];
9420 ++NumHiInputs[InputIdx];
9423 ++NumEvenInputs[InputIdx];
9425 ++NumOddInputs[InputIdx];
9428 // The minimum number of cross-input results for both the interleaved and
9429 // split cases. If interleaving results in fewer cross-input results, return
9431 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9432 NumEvenInputs[0] + NumOddInputs[1]);
9433 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9434 NumLoInputs[0] + NumHiInputs[1]);
9435 return InterleavedCrosses < SplitCrosses;
9438 /// \brief Blend two v8i16 vectors using a naive unpack strategy.
9440 /// This strategy only works when the inputs from each vector fit into a single
9441 /// half of that vector, and generally there are not so many inputs as to leave
9442 /// the in-place shuffles required highly constrained (and thus expensive). It
9443 /// shifts all the inputs into a single side of both input vectors and then
9444 /// uses an unpack to interleave these inputs in a single vector. At that
9445 /// point, we will fall back on the generic single input shuffle lowering.
9446 static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
9448 MutableArrayRef<int> Mask,
9449 const X86Subtarget *Subtarget,
9450 SelectionDAG &DAG) {
9451 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9452 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9453 SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
9454 for (int i = 0; i < 8; ++i)
9455 if (Mask[i] >= 0 && Mask[i] < 4)
9456 LoV1Inputs.push_back(i);
9457 else if (Mask[i] >= 4 && Mask[i] < 8)
9458 HiV1Inputs.push_back(i);
9459 else if (Mask[i] >= 8 && Mask[i] < 12)
9460 LoV2Inputs.push_back(i);
9461 else if (Mask[i] >= 12)
9462 HiV2Inputs.push_back(i);
9464 int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
9465 int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
9468 assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
9469 assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
9470 assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
9472 bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
9473 HiV1Inputs.size() + HiV2Inputs.size();
9475 auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
9476 ArrayRef<int> HiInputs, bool MoveToLo,
9478 ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
9479 ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
9480 if (BadInputs.empty())
9483 int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9484 int MoveOffset = MoveToLo ? 0 : 4;
9486 if (GoodInputs.empty()) {
9487 for (int BadInput : BadInputs) {
9488 MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
9489 Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
9492 if (GoodInputs.size() == 2) {
9493 // If the low inputs are spread across two dwords, pack them into
9495 MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
9496 MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
9497 Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
9498 Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
9500 // Otherwise pin the good inputs.
9501 for (int GoodInput : GoodInputs)
9502 MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
9505 if (BadInputs.size() == 2) {
9506 // If we have two bad inputs then there may be either one or two good
9507 // inputs fixed in place. Find a fixed input, and then find the *other*
9508 // two adjacent indices by using modular arithmetic.
9510 std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
9511 [](int M) { return M >= 0; }) -
9512 std::begin(MoveMask);
9514 ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
9515 assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
9516 assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
9517 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9518 MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
9519 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9520 Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
9522 assert(BadInputs.size() == 1 && "All sizes handled");
9523 int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
9524 std::end(MoveMask), -1) -
9525 std::begin(MoveMask);
9526 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9527 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9531 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9534 V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
9536 V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
9539 // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
9540 // cross-half traffic in the final shuffle.
9542 // Munge the mask to be a single-input mask after the unpack merges the
9546 M = 2 * (M % 4) + (M / 8);
9548 return DAG.getVectorShuffle(
9549 MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
9550 DL, MVT::v8i16, V1, V2),
9551 DAG.getUNDEF(MVT::v8i16), Mask);
9554 /// \brief Generic lowering of 8-lane i16 shuffles.
9556 /// This handles both single-input shuffles and combined shuffle/blends with
9557 /// two inputs. The single input shuffles are immediately delegated to
9558 /// a dedicated lowering routine.
9560 /// The blends are lowered in one of three fundamental ways. If there are few
9561 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9562 /// of the input is significantly cheaper when lowered as an interleaving of
9563 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9564 /// halves of the inputs separately (making them have relatively few inputs)
9565 /// and then concatenate them.
9566 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9567 const X86Subtarget *Subtarget,
9568 SelectionDAG &DAG) {
9570 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9571 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9572 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9573 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9574 ArrayRef<int> OrigMask = SVOp->getMask();
9575 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9576 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9577 MutableArrayRef<int> Mask(MaskStorage);
9579 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9581 // Whenever we can lower this as a zext, that instruction is strictly faster
9582 // than any alternative.
9583 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9584 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9587 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9588 auto isV2 = [](int M) { return M >= 8; };
9590 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9591 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9593 if (NumV2Inputs == 0)
9594 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9596 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9597 "to be V1-input shuffles.");
9599 // Try to use bit shift instructions.
9600 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9601 DL, MVT::v8i16, V1, V2, Mask, DAG))
9604 // Try to use byte shift instructions.
9605 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9606 DL, MVT::v8i16, V1, V2, Mask, DAG))
9609 // There are special ways we can lower some single-element blends.
9610 if (NumV2Inputs == 1)
9611 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9612 Mask, Subtarget, DAG))
9615 // We have different paths for blend lowering, but they all must use the
9616 // *exact* same predicate.
9617 bool IsBlendSupported = Subtarget->hasSSE41();
9618 if (IsBlendSupported)
9619 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9623 if (SDValue Masked =
9624 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
9627 // Use dedicated unpack instructions for masks that match their pattern.
9628 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9629 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9630 if (isShuffleEquivalent(V1, V2, Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9631 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9633 // Try to use byte rotation instructions.
9634 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9635 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9638 if (NumV1Inputs + NumV2Inputs <= 4)
9639 return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
9641 // Check whether an interleaving lowering is likely to be more efficient.
9642 // This isn't perfect but it is a strong heuristic that tends to work well on
9643 // the kinds of shuffles that show up in practice.
9645 // FIXME: Handle 1x, 2x, and 4x interleaving.
9646 if (shouldLowerAsInterleaving(Mask)) {
9647 // FIXME: Figure out whether we should pack these into the low or high
9650 int EMask[8], OMask[8];
9651 for (int i = 0; i < 4; ++i) {
9652 EMask[i] = Mask[2*i];
9653 OMask[i] = Mask[2*i + 1];
9658 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9659 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9661 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9664 // If we have direct support for blends, we should lower by decomposing into
9666 if (IsBlendSupported)
9667 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
9670 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9671 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9673 for (int i = 0; i < 4; ++i) {
9674 LoBlendMask[i] = Mask[i];
9675 HiBlendMask[i] = Mask[i + 4];
9678 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9679 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9680 LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
9681 HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
9683 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9684 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
9687 /// \brief Check whether a compaction lowering can be done by dropping even
9688 /// elements and compute how many times even elements must be dropped.
9690 /// This handles shuffles which take every Nth element where N is a power of
9691 /// two. Example shuffle masks:
9693 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9694 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9695 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9696 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9697 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9698 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9700 /// Any of these lanes can of course be undef.
9702 /// This routine only supports N <= 3.
9703 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9706 /// \returns N above, or the number of times even elements must be dropped if
9707 /// there is such a number. Otherwise returns zero.
9708 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9709 // Figure out whether we're looping over two inputs or just one.
9710 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9712 // The modulus for the shuffle vector entries is based on whether this is
9713 // a single input or not.
9714 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9715 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9716 "We should only be called with masks with a power-of-2 size!");
9718 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9720 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9721 // and 2^3 simultaneously. This is because we may have ambiguity with
9722 // partially undef inputs.
9723 bool ViableForN[3] = {true, true, true};
9725 for (int i = 0, e = Mask.size(); i < e; ++i) {
9726 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9731 bool IsAnyViable = false;
9732 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9733 if (ViableForN[j]) {
9736 // The shuffle mask must be equal to (i * 2^N) % M.
9737 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9740 ViableForN[j] = false;
9742 // Early exit if we exhaust the possible powers of two.
9747 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9751 // Return 0 as there is no viable power of two.
9755 /// \brief Generic lowering of v16i8 shuffles.
9757 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9758 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9759 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9760 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9762 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9763 const X86Subtarget *Subtarget,
9764 SelectionDAG &DAG) {
9766 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9767 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9768 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9769 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9770 ArrayRef<int> OrigMask = SVOp->getMask();
9771 assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9773 // Try to use bit shift instructions.
9774 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9775 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9778 // Try to use byte shift instructions.
9779 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9780 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9783 // Try to use byte rotation instructions.
9784 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9785 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9788 // Try to use a zext lowering.
9789 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9790 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9793 int MaskStorage[16] = {
9794 OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9795 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
9796 OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
9797 OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
9798 MutableArrayRef<int> Mask(MaskStorage);
9799 MutableArrayRef<int> LoMask = Mask.slice(0, 8);
9800 MutableArrayRef<int> HiMask = Mask.slice(8, 8);
9803 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9805 // For single-input shuffles, there are some nicer lowering tricks we can use.
9806 if (NumV2Elements == 0) {
9807 // Check for being able to broadcast a single element.
9808 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9809 Mask, Subtarget, DAG))
9812 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9813 // Notably, this handles splat and partial-splat shuffles more efficiently.
9814 // However, it only makes sense if the pre-duplication shuffle simplifies
9815 // things significantly. Currently, this means we need to be able to
9816 // express the pre-duplication shuffle as an i16 shuffle.
9818 // FIXME: We should check for other patterns which can be widened into an
9819 // i16 shuffle as well.
9820 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9821 for (int i = 0; i < 16; i += 2)
9822 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9827 auto tryToWidenViaDuplication = [&]() -> SDValue {
9828 if (!canWidenViaDuplication(Mask))
9830 SmallVector<int, 4> LoInputs;
9831 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9832 [](int M) { return M >= 0 && M < 8; });
9833 std::sort(LoInputs.begin(), LoInputs.end());
9834 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9836 SmallVector<int, 4> HiInputs;
9837 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9838 [](int M) { return M >= 8; });
9839 std::sort(HiInputs.begin(), HiInputs.end());
9840 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9843 bool TargetLo = LoInputs.size() >= HiInputs.size();
9844 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9845 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9847 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9848 SmallDenseMap<int, int, 8> LaneMap;
9849 for (int I : InPlaceInputs) {
9850 PreDupI16Shuffle[I/2] = I/2;
9853 int j = TargetLo ? 0 : 4, je = j + 4;
9854 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9855 // Check if j is already a shuffle of this input. This happens when
9856 // there are two adjacent bytes after we move the low one.
9857 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9858 // If we haven't yet mapped the input, search for a slot into which
9860 while (j < je && PreDupI16Shuffle[j] != -1)
9864 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9867 // Map this input with the i16 shuffle.
9868 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9871 // Update the lane map based on the mapping we ended up with.
9872 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9875 ISD::BITCAST, DL, MVT::v16i8,
9876 DAG.getVectorShuffle(MVT::v8i16, DL,
9877 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9878 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9880 // Unpack the bytes to form the i16s that will be shuffled into place.
9881 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9882 MVT::v16i8, V1, V1);
9884 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9885 for (int i = 0; i < 16; ++i)
9886 if (Mask[i] != -1) {
9887 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9888 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9889 if (PostDupI16Shuffle[i / 2] == -1)
9890 PostDupI16Shuffle[i / 2] = MappedMask;
9892 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9893 "Conflicting entrties in the original shuffle!");
9896 ISD::BITCAST, DL, MVT::v16i8,
9897 DAG.getVectorShuffle(MVT::v8i16, DL,
9898 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9899 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9901 if (SDValue V = tryToWidenViaDuplication())
9905 // Check whether an interleaving lowering is likely to be more efficient.
9906 // This isn't perfect but it is a strong heuristic that tends to work well on
9907 // the kinds of shuffles that show up in practice.
9909 // FIXME: We need to handle other interleaving widths (i16, i32, ...).
9910 if (shouldLowerAsInterleaving(Mask)) {
9911 int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9912 return (M >= 0 && M < 8) || (M >= 16 && M < 24);
9914 int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9915 return (M >= 8 && M < 16) || M >= 24;
9917 int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9918 -1, -1, -1, -1, -1, -1, -1, -1};
9919 int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9920 -1, -1, -1, -1, -1, -1, -1, -1};
9921 bool UnpackLo = NumLoHalf >= NumHiHalf;
9922 MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
9923 MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
9924 for (int i = 0; i < 8; ++i) {
9925 TargetEMask[i] = Mask[2 * i];
9926 TargetOMask[i] = Mask[2 * i + 1];
9929 SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
9930 SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
9932 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9933 MVT::v16i8, Evens, Odds);
9936 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9937 // with PSHUFB. It is important to do this before we attempt to generate any
9938 // blends but after all of the single-input lowerings. If the single input
9939 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9940 // want to preserve that and we can DAG combine any longer sequences into
9941 // a PSHUFB in the end. But once we start blending from multiple inputs,
9942 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9943 // and there are *very* few patterns that would actually be faster than the
9944 // PSHUFB approach because of its ability to zero lanes.
9946 // FIXME: The only exceptions to the above are blends which are exact
9947 // interleavings with direct instructions supporting them. We currently don't
9948 // handle those well here.
9949 if (Subtarget->hasSSSE3()) {
9952 bool V1InUse = false;
9953 bool V2InUse = false;
9954 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9956 for (int i = 0; i < 16; ++i) {
9957 if (Mask[i] == -1) {
9958 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9960 const int ZeroMask = 0x80;
9961 int V1Idx = (Mask[i] < 16 ? Mask[i] : ZeroMask);
9962 int V2Idx = (Mask[i] < 16 ? ZeroMask : Mask[i] - 16);
9964 V1Idx = V2Idx = ZeroMask;
9965 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
9966 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
9967 V1InUse |= (ZeroMask != V1Idx);
9968 V2InUse |= (ZeroMask != V2Idx);
9973 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
9974 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9976 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
9977 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9979 // If we need shuffled inputs from both, blend the two.
9980 if (V1InUse && V2InUse)
9981 return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9983 return V1; // Single inputs are easy.
9985 return V2; // Single inputs are easy.
9986 // Shuffling to a zeroable vector.
9987 return getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
9990 // There are special ways we can lower some single-element blends.
9991 if (NumV2Elements == 1)
9992 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
9993 Mask, Subtarget, DAG))
9996 // Check whether a compaction lowering can be done. This handles shuffles
9997 // which take every Nth element for some even N. See the helper function for
10000 // We special case these as they can be particularly efficiently handled with
10001 // the PACKUSB instruction on x86 and they show up in common patterns of
10002 // rearranging bytes to truncate wide elements.
10003 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
10004 // NumEvenDrops is the power of two stride of the elements. Another way of
10005 // thinking about it is that we need to drop the even elements this many
10006 // times to get the original input.
10007 bool IsSingleInput = isSingleInputShuffleMask(Mask);
10009 // First we need to zero all the dropped bytes.
10010 assert(NumEvenDrops <= 3 &&
10011 "No support for dropping even elements more than 3 times.");
10012 // We use the mask type to pick which bytes are preserved based on how many
10013 // elements are dropped.
10014 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
10015 SDValue ByteClearMask =
10016 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
10017 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
10018 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
10019 if (!IsSingleInput)
10020 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
10022 // Now pack things back together.
10023 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
10024 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
10025 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
10026 for (int i = 1; i < NumEvenDrops; ++i) {
10027 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
10028 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
10034 int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10035 int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10036 int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10037 int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10039 auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
10040 MutableArrayRef<int> V1HalfBlendMask,
10041 MutableArrayRef<int> V2HalfBlendMask) {
10042 for (int i = 0; i < 8; ++i)
10043 if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
10044 V1HalfBlendMask[i] = HalfMask[i];
10046 } else if (HalfMask[i] >= 16) {
10047 V2HalfBlendMask[i] = HalfMask[i] - 16;
10048 HalfMask[i] = i + 8;
10051 buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
10052 buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
10054 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
10056 auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
10057 MutableArrayRef<int> HiBlendMask) {
10059 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
10060 // them out and avoid using UNPCK{L,H} to extract the elements of V as
10062 if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
10063 [](int M) { return M >= 0 && M % 2 == 1; }) &&
10064 std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
10065 [](int M) { return M >= 0 && M % 2 == 1; })) {
10066 // Use a mask to drop the high bytes.
10067 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
10068 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
10069 DAG.getConstant(0x00FF, MVT::v8i16));
10071 // This will be a single vector shuffle instead of a blend so nuke V2.
10072 V2 = DAG.getUNDEF(MVT::v8i16);
10074 // Squash the masks to point directly into V1.
10075 for (int &M : LoBlendMask)
10078 for (int &M : HiBlendMask)
10082 // Otherwise just unpack the low half of V into V1 and the high half into
10083 // V2 so that we can blend them as i16s.
10084 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10085 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
10086 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10087 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
10090 SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
10091 SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
10092 return std::make_pair(BlendedLo, BlendedHi);
10094 SDValue V1Lo, V1Hi, V2Lo, V2Hi;
10095 std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
10096 std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
10098 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
10099 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
10101 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
10104 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
10106 /// This routine breaks down the specific type of 128-bit shuffle and
10107 /// dispatches to the lowering routines accordingly.
10108 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10109 MVT VT, const X86Subtarget *Subtarget,
10110 SelectionDAG &DAG) {
10111 switch (VT.SimpleTy) {
10113 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10115 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10117 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10119 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10121 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10123 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10126 llvm_unreachable("Unimplemented!");
10130 /// \brief Helper function to test whether a shuffle mask could be
10131 /// simplified by widening the elements being shuffled.
10133 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
10134 /// leaves it in an unspecified state.
10136 /// NOTE: This must handle normal vector shuffle masks and *target* vector
10137 /// shuffle masks. The latter have the special property of a '-2' representing
10138 /// a zero-ed lane of a vector.
10139 static bool canWidenShuffleElements(ArrayRef<int> Mask,
10140 SmallVectorImpl<int> &WidenedMask) {
10141 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
10142 // If both elements are undef, its trivial.
10143 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
10144 WidenedMask.push_back(SM_SentinelUndef);
10148 // Check for an undef mask and a mask value properly aligned to fit with
10149 // a pair of values. If we find such a case, use the non-undef mask's value.
10150 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
10151 WidenedMask.push_back(Mask[i + 1] / 2);
10154 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
10155 WidenedMask.push_back(Mask[i] / 2);
10159 // When zeroing, we need to spread the zeroing across both lanes to widen.
10160 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
10161 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
10162 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
10163 WidenedMask.push_back(SM_SentinelZero);
10169 // Finally check if the two mask values are adjacent and aligned with
10171 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
10172 WidenedMask.push_back(Mask[i] / 2);
10176 // Otherwise we can't safely widen the elements used in this shuffle.
10179 assert(WidenedMask.size() == Mask.size() / 2 &&
10180 "Incorrect size of mask after widening the elements!");
10185 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
10187 /// This routine just extracts two subvectors, shuffles them independently, and
10188 /// then concatenates them back together. This should work effectively with all
10189 /// AVX vector shuffle types.
10190 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10191 SDValue V2, ArrayRef<int> Mask,
10192 SelectionDAG &DAG) {
10193 assert(VT.getSizeInBits() >= 256 &&
10194 "Only for 256-bit or wider vector shuffles!");
10195 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
10196 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
10198 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
10199 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
10201 int NumElements = VT.getVectorNumElements();
10202 int SplitNumElements = NumElements / 2;
10203 MVT ScalarVT = VT.getScalarType();
10204 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
10206 // Rather than splitting build-vectors, just build two narrower build
10207 // vectors. This helps shuffling with splats and zeros.
10208 auto SplitVector = [&](SDValue V) {
10209 while (V.getOpcode() == ISD::BITCAST)
10210 V = V->getOperand(0);
10212 MVT OrigVT = V.getSimpleValueType();
10213 int OrigNumElements = OrigVT.getVectorNumElements();
10214 int OrigSplitNumElements = OrigNumElements / 2;
10215 MVT OrigScalarVT = OrigVT.getScalarType();
10216 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
10220 auto *BV = dyn_cast<BuildVectorSDNode>(V);
10222 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10223 DAG.getIntPtrConstant(0));
10224 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10225 DAG.getIntPtrConstant(OrigSplitNumElements));
10228 SmallVector<SDValue, 16> LoOps, HiOps;
10229 for (int i = 0; i < OrigSplitNumElements; ++i) {
10230 LoOps.push_back(BV->getOperand(i));
10231 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
10233 LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
10234 HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
10236 return std::make_pair(DAG.getNode(ISD::BITCAST, DL, SplitVT, LoV),
10237 DAG.getNode(ISD::BITCAST, DL, SplitVT, HiV));
10240 SDValue LoV1, HiV1, LoV2, HiV2;
10241 std::tie(LoV1, HiV1) = SplitVector(V1);
10242 std::tie(LoV2, HiV2) = SplitVector(V2);
10244 // Now create two 4-way blends of these half-width vectors.
10245 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10246 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10247 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
10248 for (int i = 0; i < SplitNumElements; ++i) {
10249 int M = HalfMask[i];
10250 if (M >= NumElements) {
10251 if (M >= NumElements + SplitNumElements)
10255 V2BlendMask.push_back(M - NumElements);
10256 V1BlendMask.push_back(-1);
10257 BlendMask.push_back(SplitNumElements + i);
10258 } else if (M >= 0) {
10259 if (M >= SplitNumElements)
10263 V2BlendMask.push_back(-1);
10264 V1BlendMask.push_back(M);
10265 BlendMask.push_back(i);
10267 V2BlendMask.push_back(-1);
10268 V1BlendMask.push_back(-1);
10269 BlendMask.push_back(-1);
10273 // Because the lowering happens after all combining takes place, we need to
10274 // manually combine these blend masks as much as possible so that we create
10275 // a minimal number of high-level vector shuffle nodes.
10277 // First try just blending the halves of V1 or V2.
10278 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10279 return DAG.getUNDEF(SplitVT);
10280 if (!UseLoV2 && !UseHiV2)
10281 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10282 if (!UseLoV1 && !UseHiV1)
10283 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10285 SDValue V1Blend, V2Blend;
10286 if (UseLoV1 && UseHiV1) {
10288 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10290 // We only use half of V1 so map the usage down into the final blend mask.
10291 V1Blend = UseLoV1 ? LoV1 : HiV1;
10292 for (int i = 0; i < SplitNumElements; ++i)
10293 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10294 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10296 if (UseLoV2 && UseHiV2) {
10298 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10300 // We only use half of V2 so map the usage down into the final blend mask.
10301 V2Blend = UseLoV2 ? LoV2 : HiV2;
10302 for (int i = 0; i < SplitNumElements; ++i)
10303 if (BlendMask[i] >= SplitNumElements)
10304 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10306 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10308 SDValue Lo = HalfBlend(LoMask);
10309 SDValue Hi = HalfBlend(HiMask);
10310 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10313 /// \brief Either split a vector in halves or decompose the shuffles and the
10316 /// This is provided as a good fallback for many lowerings of non-single-input
10317 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10318 /// between splitting the shuffle into 128-bit components and stitching those
10319 /// back together vs. extracting the single-input shuffles and blending those
10321 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10322 SDValue V2, ArrayRef<int> Mask,
10323 SelectionDAG &DAG) {
10324 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10325 "lower single-input shuffles as it "
10326 "could then recurse on itself.");
10327 int Size = Mask.size();
10329 // If this can be modeled as a broadcast of two elements followed by a blend,
10330 // prefer that lowering. This is especially important because broadcasts can
10331 // often fold with memory operands.
10332 auto DoBothBroadcast = [&] {
10333 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10336 if (V2BroadcastIdx == -1)
10337 V2BroadcastIdx = M - Size;
10338 else if (M - Size != V2BroadcastIdx)
10340 } else if (M >= 0) {
10341 if (V1BroadcastIdx == -1)
10342 V1BroadcastIdx = M;
10343 else if (M != V1BroadcastIdx)
10348 if (DoBothBroadcast())
10349 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10352 // If the inputs all stem from a single 128-bit lane of each input, then we
10353 // split them rather than blending because the split will decompose to
10354 // unusually few instructions.
10355 int LaneCount = VT.getSizeInBits() / 128;
10356 int LaneSize = Size / LaneCount;
10357 SmallBitVector LaneInputs[2];
10358 LaneInputs[0].resize(LaneCount, false);
10359 LaneInputs[1].resize(LaneCount, false);
10360 for (int i = 0; i < Size; ++i)
10362 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10363 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10364 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10366 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10367 // that the decomposed single-input shuffles don't end up here.
10368 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10371 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10372 /// a permutation and blend of those lanes.
10374 /// This essentially blends the out-of-lane inputs to each lane into the lane
10375 /// from a permuted copy of the vector. This lowering strategy results in four
10376 /// instructions in the worst case for a single-input cross lane shuffle which
10377 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10378 /// of. Special cases for each particular shuffle pattern should be handled
10379 /// prior to trying this lowering.
10380 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10381 SDValue V1, SDValue V2,
10382 ArrayRef<int> Mask,
10383 SelectionDAG &DAG) {
10384 // FIXME: This should probably be generalized for 512-bit vectors as well.
10385 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10386 int LaneSize = Mask.size() / 2;
10388 // If there are only inputs from one 128-bit lane, splitting will in fact be
10389 // less expensive. The flags track wether the given lane contains an element
10390 // that crosses to another lane.
10391 bool LaneCrossing[2] = {false, false};
10392 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10393 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10394 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10395 if (!LaneCrossing[0] || !LaneCrossing[1])
10396 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10398 if (isSingleInputShuffleMask(Mask)) {
10399 SmallVector<int, 32> FlippedBlendMask;
10400 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10401 FlippedBlendMask.push_back(
10402 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10404 : Mask[i] % LaneSize +
10405 (i / LaneSize) * LaneSize + Size));
10407 // Flip the vector, and blend the results which should now be in-lane. The
10408 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10409 // 5 for the high source. The value 3 selects the high half of source 2 and
10410 // the value 2 selects the low half of source 2. We only use source 2 to
10411 // allow folding it into a memory operand.
10412 unsigned PERMMask = 3 | 2 << 4;
10413 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10414 V1, DAG.getConstant(PERMMask, MVT::i8));
10415 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10418 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10419 // will be handled by the above logic and a blend of the results, much like
10420 // other patterns in AVX.
10421 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10424 /// \brief Handle lowering 2-lane 128-bit shuffles.
10425 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10426 SDValue V2, ArrayRef<int> Mask,
10427 const X86Subtarget *Subtarget,
10428 SelectionDAG &DAG) {
10429 // Blends are faster and handle all the non-lane-crossing cases.
10430 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10434 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10435 VT.getVectorNumElements() / 2);
10436 // Check for patterns which can be matched with a single insert of a 128-bit
10438 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 0, 1) ||
10439 isShuffleEquivalent(V1, V2, Mask, 0, 1, 4, 5)) {
10440 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10441 DAG.getIntPtrConstant(0));
10442 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10443 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10444 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10446 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 6, 7)) {
10447 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10448 DAG.getIntPtrConstant(0));
10449 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10450 DAG.getIntPtrConstant(2));
10451 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10454 // Otherwise form a 128-bit permutation.
10455 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10456 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10457 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10458 DAG.getConstant(PermMask, MVT::i8));
10461 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10462 /// shuffling each lane.
10464 /// This will only succeed when the result of fixing the 128-bit lanes results
10465 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10466 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10467 /// the lane crosses early and then use simpler shuffles within each lane.
10469 /// FIXME: It might be worthwhile at some point to support this without
10470 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10471 /// in x86 only floating point has interesting non-repeating shuffles, and even
10472 /// those are still *marginally* more expensive.
10473 static SDValue lowerVectorShuffleByMerging128BitLanes(
10474 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10475 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10476 assert(!isSingleInputShuffleMask(Mask) &&
10477 "This is only useful with multiple inputs.");
10479 int Size = Mask.size();
10480 int LaneSize = 128 / VT.getScalarSizeInBits();
10481 int NumLanes = Size / LaneSize;
10482 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10484 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10485 // check whether the in-128-bit lane shuffles share a repeating pattern.
10486 SmallVector<int, 4> Lanes;
10487 Lanes.resize(NumLanes, -1);
10488 SmallVector<int, 4> InLaneMask;
10489 InLaneMask.resize(LaneSize, -1);
10490 for (int i = 0; i < Size; ++i) {
10494 int j = i / LaneSize;
10496 if (Lanes[j] < 0) {
10497 // First entry we've seen for this lane.
10498 Lanes[j] = Mask[i] / LaneSize;
10499 } else if (Lanes[j] != Mask[i] / LaneSize) {
10500 // This doesn't match the lane selected previously!
10504 // Check that within each lane we have a consistent shuffle mask.
10505 int k = i % LaneSize;
10506 if (InLaneMask[k] < 0) {
10507 InLaneMask[k] = Mask[i] % LaneSize;
10508 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10509 // This doesn't fit a repeating in-lane mask.
10514 // First shuffle the lanes into place.
10515 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10516 VT.getSizeInBits() / 64);
10517 SmallVector<int, 8> LaneMask;
10518 LaneMask.resize(NumLanes * 2, -1);
10519 for (int i = 0; i < NumLanes; ++i)
10520 if (Lanes[i] >= 0) {
10521 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10522 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10525 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10526 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10527 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10529 // Cast it back to the type we actually want.
10530 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10532 // Now do a simple shuffle that isn't lane crossing.
10533 SmallVector<int, 8> NewMask;
10534 NewMask.resize(Size, -1);
10535 for (int i = 0; i < Size; ++i)
10537 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10538 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10539 "Must not introduce lane crosses at this point!");
10541 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10544 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10547 /// This returns true if the elements from a particular input are already in the
10548 /// slot required by the given mask and require no permutation.
10549 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10550 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10551 int Size = Mask.size();
10552 for (int i = 0; i < Size; ++i)
10553 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10559 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10561 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10562 /// isn't available.
10563 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10564 const X86Subtarget *Subtarget,
10565 SelectionDAG &DAG) {
10567 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10568 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10569 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10570 ArrayRef<int> Mask = SVOp->getMask();
10571 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10573 SmallVector<int, 4> WidenedMask;
10574 if (canWidenShuffleElements(Mask, WidenedMask))
10575 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10578 if (isSingleInputShuffleMask(Mask)) {
10579 // Check for being able to broadcast a single element.
10580 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10581 Mask, Subtarget, DAG))
10584 // Use low duplicate instructions for masks that match their pattern.
10585 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
10586 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10588 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10589 // Non-half-crossing single input shuffles can be lowerid with an
10590 // interleaved permutation.
10591 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10592 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10593 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10594 DAG.getConstant(VPERMILPMask, MVT::i8));
10597 // With AVX2 we have direct support for this permutation.
10598 if (Subtarget->hasAVX2())
10599 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10600 getV4X86ShuffleImm8ForMask(Mask, DAG));
10602 // Otherwise, fall back.
10603 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10607 // X86 has dedicated unpack instructions that can handle specific blend
10608 // operations: UNPCKH and UNPCKL.
10609 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10610 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10611 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10612 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10614 // If we have a single input to the zero element, insert that into V1 if we
10615 // can do so cheaply.
10616 int NumV2Elements =
10617 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10618 if (NumV2Elements == 1 && Mask[0] >= 4)
10619 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10620 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10623 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10627 // Check if the blend happens to exactly fit that of SHUFPD.
10628 if ((Mask[0] == -1 || Mask[0] < 2) &&
10629 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10630 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10631 (Mask[3] == -1 || Mask[3] >= 6)) {
10632 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10633 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10634 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10635 DAG.getConstant(SHUFPDMask, MVT::i8));
10637 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10638 (Mask[1] == -1 || Mask[1] < 2) &&
10639 (Mask[2] == -1 || Mask[2] >= 6) &&
10640 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10641 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10642 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10643 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10644 DAG.getConstant(SHUFPDMask, MVT::i8));
10647 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10648 // shuffle. However, if we have AVX2 and either inputs are already in place,
10649 // we will be able to shuffle even across lanes the other input in a single
10650 // instruction so skip this pattern.
10651 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10652 isShuffleMaskInputInPlace(1, Mask))))
10653 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10654 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10657 // If we have AVX2 then we always want to lower with a blend because an v4 we
10658 // can fully permute the elements.
10659 if (Subtarget->hasAVX2())
10660 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10663 // Otherwise fall back on generic lowering.
10664 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10667 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10669 /// This routine is only called when we have AVX2 and thus a reasonable
10670 /// instruction set for v4i64 shuffling..
10671 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10672 const X86Subtarget *Subtarget,
10673 SelectionDAG &DAG) {
10675 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10676 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10677 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10678 ArrayRef<int> Mask = SVOp->getMask();
10679 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10680 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10682 SmallVector<int, 4> WidenedMask;
10683 if (canWidenShuffleElements(Mask, WidenedMask))
10684 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10687 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10691 // Check for being able to broadcast a single element.
10692 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10693 Mask, Subtarget, DAG))
10696 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10697 // use lower latency instructions that will operate on both 128-bit lanes.
10698 SmallVector<int, 2> RepeatedMask;
10699 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10700 if (isSingleInputShuffleMask(Mask)) {
10701 int PSHUFDMask[] = {-1, -1, -1, -1};
10702 for (int i = 0; i < 2; ++i)
10703 if (RepeatedMask[i] >= 0) {
10704 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10705 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10707 return DAG.getNode(
10708 ISD::BITCAST, DL, MVT::v4i64,
10709 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10710 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10711 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10715 // AVX2 provides a direct instruction for permuting a single input across
10717 if (isSingleInputShuffleMask(Mask))
10718 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10719 getV4X86ShuffleImm8ForMask(Mask, DAG));
10721 // Try to use byte shift instructions.
10722 if (SDValue Shift = lowerVectorShuffleAsByteShift(
10723 DL, MVT::v4i64, V1, V2, Mask, DAG))
10726 // Use dedicated unpack instructions for masks that match their pattern.
10727 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10728 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10729 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10730 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10732 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10733 // shuffle. However, if we have AVX2 and either inputs are already in place,
10734 // we will be able to shuffle even across lanes the other input in a single
10735 // instruction so skip this pattern.
10736 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10737 isShuffleMaskInputInPlace(1, Mask))))
10738 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10739 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10742 // Otherwise fall back on generic blend lowering.
10743 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10747 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10749 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10750 /// isn't available.
10751 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10752 const X86Subtarget *Subtarget,
10753 SelectionDAG &DAG) {
10755 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10756 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10757 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10758 ArrayRef<int> Mask = SVOp->getMask();
10759 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10761 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10765 // Check for being able to broadcast a single element.
10766 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10767 Mask, Subtarget, DAG))
10770 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10771 // options to efficiently lower the shuffle.
10772 SmallVector<int, 4> RepeatedMask;
10773 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10774 assert(RepeatedMask.size() == 4 &&
10775 "Repeated masks must be half the mask width!");
10777 // Use even/odd duplicate instructions for masks that match their pattern.
10778 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10779 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10780 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10781 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10783 if (isSingleInputShuffleMask(Mask))
10784 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10785 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10787 // Use dedicated unpack instructions for masks that match their pattern.
10788 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10789 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10790 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10791 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10793 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10794 // have already handled any direct blends. We also need to squash the
10795 // repeated mask into a simulated v4f32 mask.
10796 for (int i = 0; i < 4; ++i)
10797 if (RepeatedMask[i] >= 8)
10798 RepeatedMask[i] -= 4;
10799 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10802 // If we have a single input shuffle with different shuffle patterns in the
10803 // two 128-bit lanes use the variable mask to VPERMILPS.
10804 if (isSingleInputShuffleMask(Mask)) {
10805 SDValue VPermMask[8];
10806 for (int i = 0; i < 8; ++i)
10807 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10808 : DAG.getConstant(Mask[i], MVT::i32);
10809 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10810 return DAG.getNode(
10811 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10812 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10814 if (Subtarget->hasAVX2())
10815 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10816 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10817 DAG.getNode(ISD::BUILD_VECTOR, DL,
10818 MVT::v8i32, VPermMask)),
10821 // Otherwise, fall back.
10822 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10826 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10828 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10829 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10832 // If we have AVX2 then we always want to lower with a blend because at v8 we
10833 // can fully permute the elements.
10834 if (Subtarget->hasAVX2())
10835 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10838 // Otherwise fall back on generic lowering.
10839 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10842 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10844 /// This routine is only called when we have AVX2 and thus a reasonable
10845 /// instruction set for v8i32 shuffling..
10846 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10847 const X86Subtarget *Subtarget,
10848 SelectionDAG &DAG) {
10850 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10851 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10852 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10853 ArrayRef<int> Mask = SVOp->getMask();
10854 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10855 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10857 // Whenever we can lower this as a zext, that instruction is strictly faster
10858 // than any alternative. It also allows us to fold memory operands into the
10859 // shuffle in many cases.
10860 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
10861 Mask, Subtarget, DAG))
10864 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10868 // Check for being able to broadcast a single element.
10869 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10870 Mask, Subtarget, DAG))
10873 // If the shuffle mask is repeated in each 128-bit lane we can use more
10874 // efficient instructions that mirror the shuffles across the two 128-bit
10876 SmallVector<int, 4> RepeatedMask;
10877 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10878 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10879 if (isSingleInputShuffleMask(Mask))
10880 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10881 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10883 // Use dedicated unpack instructions for masks that match their pattern.
10884 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10885 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10886 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10887 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10890 // Try to use bit shift instructions.
10891 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10892 DL, MVT::v8i32, V1, V2, Mask, DAG))
10895 // Try to use byte shift instructions.
10896 if (SDValue Shift = lowerVectorShuffleAsByteShift(
10897 DL, MVT::v8i32, V1, V2, Mask, DAG))
10900 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10901 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10904 // If the shuffle patterns aren't repeated but it is a single input, directly
10905 // generate a cross-lane VPERMD instruction.
10906 if (isSingleInputShuffleMask(Mask)) {
10907 SDValue VPermMask[8];
10908 for (int i = 0; i < 8; ++i)
10909 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10910 : DAG.getConstant(Mask[i], MVT::i32);
10911 return DAG.getNode(
10912 X86ISD::VPERMV, DL, MVT::v8i32,
10913 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10916 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10918 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10919 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10922 // Otherwise fall back on generic blend lowering.
10923 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10927 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10929 /// This routine is only called when we have AVX2 and thus a reasonable
10930 /// instruction set for v16i16 shuffling..
10931 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10932 const X86Subtarget *Subtarget,
10933 SelectionDAG &DAG) {
10935 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10936 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10937 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10938 ArrayRef<int> Mask = SVOp->getMask();
10939 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10940 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10942 // Whenever we can lower this as a zext, that instruction is strictly faster
10943 // than any alternative. It also allows us to fold memory operands into the
10944 // shuffle in many cases.
10945 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
10946 Mask, Subtarget, DAG))
10949 // Check for being able to broadcast a single element.
10950 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
10951 Mask, Subtarget, DAG))
10954 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10958 // Use dedicated unpack instructions for masks that match their pattern.
10959 if (isShuffleEquivalent(V1, V2, Mask,
10960 // First 128-bit lane:
10961 0, 16, 1, 17, 2, 18, 3, 19,
10962 // Second 128-bit lane:
10963 8, 24, 9, 25, 10, 26, 11, 27))
10964 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
10965 if (isShuffleEquivalent(V1, V2, Mask,
10966 // First 128-bit lane:
10967 4, 20, 5, 21, 6, 22, 7, 23,
10968 // Second 128-bit lane:
10969 12, 28, 13, 29, 14, 30, 15, 31))
10970 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
10972 // Try to use bit shift instructions.
10973 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10974 DL, MVT::v16i16, V1, V2, Mask, DAG))
10977 // Try to use byte shift instructions.
10978 if (SDValue Shift = lowerVectorShuffleAsByteShift(
10979 DL, MVT::v16i16, V1, V2, Mask, DAG))
10982 // Try to use byte rotation instructions.
10983 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10984 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10987 if (isSingleInputShuffleMask(Mask)) {
10988 // There are no generalized cross-lane shuffle operations available on i16
10990 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
10991 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
10994 SDValue PSHUFBMask[32];
10995 for (int i = 0; i < 16; ++i) {
10996 if (Mask[i] == -1) {
10997 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
11001 int M = i < 8 ? Mask[i] : Mask[i] - 8;
11002 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
11003 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
11004 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
11006 return DAG.getNode(
11007 ISD::BITCAST, DL, MVT::v16i16,
11009 X86ISD::PSHUFB, DL, MVT::v32i8,
11010 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
11011 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
11014 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11016 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11017 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
11020 // Otherwise fall back on generic lowering.
11021 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
11024 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
11026 /// This routine is only called when we have AVX2 and thus a reasonable
11027 /// instruction set for v32i8 shuffling..
11028 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11029 const X86Subtarget *Subtarget,
11030 SelectionDAG &DAG) {
11032 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
11033 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
11034 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11035 ArrayRef<int> Mask = SVOp->getMask();
11036 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11037 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
11039 // Whenever we can lower this as a zext, that instruction is strictly faster
11040 // than any alternative. It also allows us to fold memory operands into the
11041 // shuffle in many cases.
11042 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
11043 Mask, Subtarget, DAG))
11046 // Check for being able to broadcast a single element.
11047 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
11048 Mask, Subtarget, DAG))
11051 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
11055 // Use dedicated unpack instructions for masks that match their pattern.
11056 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
11058 if (isShuffleEquivalent(
11060 // First 128-bit lane:
11061 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
11062 // Second 128-bit lane:
11063 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
11064 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
11065 if (isShuffleEquivalent(
11067 // First 128-bit lane:
11068 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
11069 // Second 128-bit lane:
11070 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
11071 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
11073 // Try to use bit shift instructions.
11074 if (SDValue Shift = lowerVectorShuffleAsBitShift(
11075 DL, MVT::v32i8, V1, V2, Mask, DAG))
11078 // Try to use byte shift instructions.
11079 if (SDValue Shift = lowerVectorShuffleAsByteShift(
11080 DL, MVT::v32i8, V1, V2, Mask, DAG))
11083 // Try to use byte rotation instructions.
11084 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11085 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11088 if (isSingleInputShuffleMask(Mask)) {
11089 // There are no generalized cross-lane shuffle operations available on i8
11091 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
11092 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
11095 SDValue PSHUFBMask[32];
11096 for (int i = 0; i < 32; ++i)
11099 ? DAG.getUNDEF(MVT::i8)
11100 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
11102 return DAG.getNode(
11103 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
11104 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
11107 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11109 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11110 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11113 // Otherwise fall back on generic lowering.
11114 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
11117 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
11119 /// This routine either breaks down the specific type of a 256-bit x86 vector
11120 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
11121 /// together based on the available instructions.
11122 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11123 MVT VT, const X86Subtarget *Subtarget,
11124 SelectionDAG &DAG) {
11126 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11127 ArrayRef<int> Mask = SVOp->getMask();
11129 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
11130 // check for those subtargets here and avoid much of the subtarget querying in
11131 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
11132 // ability to manipulate a 256-bit vector with integer types. Since we'll use
11133 // floating point types there eventually, just immediately cast everything to
11134 // a float and operate entirely in that domain.
11135 if (VT.isInteger() && !Subtarget->hasAVX2()) {
11136 int ElementBits = VT.getScalarSizeInBits();
11137 if (ElementBits < 32)
11138 // No floating point type available, decompose into 128-bit vectors.
11139 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11141 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
11142 VT.getVectorNumElements());
11143 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
11144 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
11145 return DAG.getNode(ISD::BITCAST, DL, VT,
11146 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
11149 switch (VT.SimpleTy) {
11151 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11153 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11155 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11157 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11159 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11161 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11164 llvm_unreachable("Not a valid 256-bit x86 vector type!");
11168 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
11169 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11170 const X86Subtarget *Subtarget,
11171 SelectionDAG &DAG) {
11173 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11174 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11175 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11176 ArrayRef<int> Mask = SVOp->getMask();
11177 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11179 // X86 has dedicated unpack instructions that can handle specific blend
11180 // operations: UNPCKH and UNPCKL.
11181 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11182 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
11183 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11184 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
11186 // FIXME: Implement direct support for this type!
11187 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
11190 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
11191 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11192 const X86Subtarget *Subtarget,
11193 SelectionDAG &DAG) {
11195 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11196 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11197 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11198 ArrayRef<int> Mask = SVOp->getMask();
11199 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11201 // Use dedicated unpack instructions for masks that match their pattern.
11202 if (isShuffleEquivalent(V1, V2, Mask,
11203 0, 16, 1, 17, 4, 20, 5, 21,
11204 8, 24, 9, 25, 12, 28, 13, 29))
11205 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
11206 if (isShuffleEquivalent(V1, V2, Mask,
11207 2, 18, 3, 19, 6, 22, 7, 23,
11208 10, 26, 11, 27, 14, 30, 15, 31))
11209 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
11211 // FIXME: Implement direct support for this type!
11212 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
11215 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
11216 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11217 const X86Subtarget *Subtarget,
11218 SelectionDAG &DAG) {
11220 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11221 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11222 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11223 ArrayRef<int> Mask = SVOp->getMask();
11224 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11226 // X86 has dedicated unpack instructions that can handle specific blend
11227 // operations: UNPCKH and UNPCKL.
11228 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11229 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
11230 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11231 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
11233 // FIXME: Implement direct support for this type!
11234 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
11237 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11238 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11239 const X86Subtarget *Subtarget,
11240 SelectionDAG &DAG) {
11242 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11243 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11244 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11245 ArrayRef<int> Mask = SVOp->getMask();
11246 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11248 // Use dedicated unpack instructions for masks that match their pattern.
11249 if (isShuffleEquivalent(V1, V2, Mask,
11250 0, 16, 1, 17, 4, 20, 5, 21,
11251 8, 24, 9, 25, 12, 28, 13, 29))
11252 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
11253 if (isShuffleEquivalent(V1, V2, Mask,
11254 2, 18, 3, 19, 6, 22, 7, 23,
11255 10, 26, 11, 27, 14, 30, 15, 31))
11256 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
11258 // FIXME: Implement direct support for this type!
11259 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
11262 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11263 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11264 const X86Subtarget *Subtarget,
11265 SelectionDAG &DAG) {
11267 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11268 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11269 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11270 ArrayRef<int> Mask = SVOp->getMask();
11271 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11272 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
11274 // FIXME: Implement direct support for this type!
11275 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
11278 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
11279 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11280 const X86Subtarget *Subtarget,
11281 SelectionDAG &DAG) {
11283 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11284 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11285 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11286 ArrayRef<int> Mask = SVOp->getMask();
11287 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
11288 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
11290 // FIXME: Implement direct support for this type!
11291 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
11294 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
11296 /// This routine either breaks down the specific type of a 512-bit x86 vector
11297 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
11298 /// together based on the available instructions.
11299 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11300 MVT VT, const X86Subtarget *Subtarget,
11301 SelectionDAG &DAG) {
11303 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11304 ArrayRef<int> Mask = SVOp->getMask();
11305 assert(Subtarget->hasAVX512() &&
11306 "Cannot lower 512-bit vectors w/ basic ISA!");
11308 // Check for being able to broadcast a single element.
11309 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
11310 Mask, Subtarget, DAG))
11313 // Dispatch to each element type for lowering. If we don't have supprot for
11314 // specific element type shuffles at 512 bits, immediately split them and
11315 // lower them. Each lowering routine of a given type is allowed to assume that
11316 // the requisite ISA extensions for that element type are available.
11317 switch (VT.SimpleTy) {
11319 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11321 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11323 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11325 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11327 if (Subtarget->hasBWI())
11328 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11331 if (Subtarget->hasBWI())
11332 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11336 llvm_unreachable("Not a valid 512-bit x86 vector type!");
11339 // Otherwise fall back on splitting.
11340 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11343 /// \brief Top-level lowering for x86 vector shuffles.
11345 /// This handles decomposition, canonicalization, and lowering of all x86
11346 /// vector shuffles. Most of the specific lowering strategies are encapsulated
11347 /// above in helper routines. The canonicalization attempts to widen shuffles
11348 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
11349 /// s.t. only one of the two inputs needs to be tested, etc.
11350 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
11351 SelectionDAG &DAG) {
11352 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11353 ArrayRef<int> Mask = SVOp->getMask();
11354 SDValue V1 = Op.getOperand(0);
11355 SDValue V2 = Op.getOperand(1);
11356 MVT VT = Op.getSimpleValueType();
11357 int NumElements = VT.getVectorNumElements();
11360 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
11362 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
11363 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11364 if (V1IsUndef && V2IsUndef)
11365 return DAG.getUNDEF(VT);
11367 // When we create a shuffle node we put the UNDEF node to second operand,
11368 // but in some cases the first operand may be transformed to UNDEF.
11369 // In this case we should just commute the node.
11371 return DAG.getCommutedVectorShuffle(*SVOp);
11373 // Check for non-undef masks pointing at an undef vector and make the masks
11374 // undef as well. This makes it easier to match the shuffle based solely on
11378 if (M >= NumElements) {
11379 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11380 for (int &M : NewMask)
11381 if (M >= NumElements)
11383 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11386 // We actually see shuffles that are entirely re-arrangements of a set of
11387 // zero inputs. This mostly happens while decomposing complex shuffles into
11388 // simple ones. Directly lower these as a buildvector of zeros.
11389 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
11390 if (Zeroable.all())
11391 return getZeroVector(VT, Subtarget, DAG, dl);
11393 // Try to collapse shuffles into using a vector type with fewer elements but
11394 // wider element types. We cap this to not form integers or floating point
11395 // elements wider than 64 bits, but it might be interesting to form i128
11396 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11397 SmallVector<int, 16> WidenedMask;
11398 if (VT.getScalarSizeInBits() < 64 &&
11399 canWidenShuffleElements(Mask, WidenedMask)) {
11400 MVT NewEltVT = VT.isFloatingPoint()
11401 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11402 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11403 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11404 // Make sure that the new vector type is legal. For example, v2f64 isn't
11406 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11407 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11408 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11409 return DAG.getNode(ISD::BITCAST, dl, VT,
11410 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11414 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11415 for (int M : SVOp->getMask())
11417 ++NumUndefElements;
11418 else if (M < NumElements)
11423 // Commute the shuffle as needed such that more elements come from V1 than
11424 // V2. This allows us to match the shuffle pattern strictly on how many
11425 // elements come from V1 without handling the symmetric cases.
11426 if (NumV2Elements > NumV1Elements)
11427 return DAG.getCommutedVectorShuffle(*SVOp);
11429 // When the number of V1 and V2 elements are the same, try to minimize the
11430 // number of uses of V2 in the low half of the vector. When that is tied,
11431 // ensure that the sum of indices for V1 is equal to or lower than the sum
11432 // indices for V2. When those are equal, try to ensure that the number of odd
11433 // indices for V1 is lower than the number of odd indices for V2.
11434 if (NumV1Elements == NumV2Elements) {
11435 int LowV1Elements = 0, LowV2Elements = 0;
11436 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11437 if (M >= NumElements)
11441 if (LowV2Elements > LowV1Elements) {
11442 return DAG.getCommutedVectorShuffle(*SVOp);
11443 } else if (LowV2Elements == LowV1Elements) {
11444 int SumV1Indices = 0, SumV2Indices = 0;
11445 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11446 if (SVOp->getMask()[i] >= NumElements)
11448 else if (SVOp->getMask()[i] >= 0)
11450 if (SumV2Indices < SumV1Indices) {
11451 return DAG.getCommutedVectorShuffle(*SVOp);
11452 } else if (SumV2Indices == SumV1Indices) {
11453 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11454 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11455 if (SVOp->getMask()[i] >= NumElements)
11456 NumV2OddIndices += i % 2;
11457 else if (SVOp->getMask()[i] >= 0)
11458 NumV1OddIndices += i % 2;
11459 if (NumV2OddIndices < NumV1OddIndices)
11460 return DAG.getCommutedVectorShuffle(*SVOp);
11465 // For each vector width, delegate to a specialized lowering routine.
11466 if (VT.getSizeInBits() == 128)
11467 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11469 if (VT.getSizeInBits() == 256)
11470 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11472 // Force AVX-512 vectors to be scalarized for now.
11473 // FIXME: Implement AVX-512 support!
11474 if (VT.getSizeInBits() == 512)
11475 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11477 llvm_unreachable("Unimplemented!");
11481 //===----------------------------------------------------------------------===//
11482 // Legacy vector shuffle lowering
11484 // This code is the legacy code handling vector shuffles until the above
11485 // replaces its functionality and performance.
11486 //===----------------------------------------------------------------------===//
11488 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11489 bool hasInt256, unsigned *MaskOut = nullptr) {
11490 MVT EltVT = VT.getVectorElementType();
11492 // There is no blend with immediate in AVX-512.
11493 if (VT.is512BitVector())
11496 if (!hasSSE41 || EltVT == MVT::i8)
11498 if (!hasInt256 && VT == MVT::v16i16)
11501 unsigned MaskValue = 0;
11502 unsigned NumElems = VT.getVectorNumElements();
11503 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11504 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11505 unsigned NumElemsInLane = NumElems / NumLanes;
11507 // Blend for v16i16 should be symmetric for both lanes.
11508 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11510 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11511 int EltIdx = MaskVals[i];
11513 if ((EltIdx < 0 || EltIdx == (int)i) &&
11514 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11517 if (((unsigned)EltIdx == (i + NumElems)) &&
11518 (SndLaneEltIdx < 0 ||
11519 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11520 MaskValue |= (1 << i);
11526 *MaskOut = MaskValue;
11530 // Try to lower a shuffle node into a simple blend instruction.
11531 // This function assumes isBlendMask returns true for this
11532 // SuffleVectorSDNode
11533 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11534 unsigned MaskValue,
11535 const X86Subtarget *Subtarget,
11536 SelectionDAG &DAG) {
11537 MVT VT = SVOp->getSimpleValueType(0);
11538 MVT EltVT = VT.getVectorElementType();
11539 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11540 Subtarget->hasInt256() && "Trying to lower a "
11541 "VECTOR_SHUFFLE to a Blend but "
11542 "with the wrong mask"));
11543 SDValue V1 = SVOp->getOperand(0);
11544 SDValue V2 = SVOp->getOperand(1);
11546 unsigned NumElems = VT.getVectorNumElements();
11548 // Convert i32 vectors to floating point if it is not AVX2.
11549 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11551 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11552 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11554 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11555 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11558 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11559 DAG.getConstant(MaskValue, MVT::i32));
11560 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11563 /// In vector type \p VT, return true if the element at index \p InputIdx
11564 /// falls on a different 128-bit lane than \p OutputIdx.
11565 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11566 unsigned OutputIdx) {
11567 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11568 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11571 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11572 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11573 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11574 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11576 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11577 SelectionDAG &DAG) {
11578 MVT VT = V1.getSimpleValueType();
11579 assert(VT.is128BitVector() || VT.is256BitVector());
11581 MVT EltVT = VT.getVectorElementType();
11582 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11583 unsigned NumElts = VT.getVectorNumElements();
11585 SmallVector<SDValue, 32> PshufbMask;
11586 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11587 int InputIdx = MaskVals[OutputIdx];
11588 unsigned InputByteIdx;
11590 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11591 InputByteIdx = 0x80;
11593 // Cross lane is not allowed.
11594 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11596 InputByteIdx = InputIdx * EltSizeInBytes;
11597 // Index is an byte offset within the 128-bit lane.
11598 InputByteIdx &= 0xf;
11601 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11602 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11603 if (InputByteIdx != 0x80)
11608 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11610 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11611 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11612 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11615 // v8i16 shuffles - Prefer shuffles in the following order:
11616 // 1. [all] pshuflw, pshufhw, optional move
11617 // 2. [ssse3] 1 x pshufb
11618 // 3. [ssse3] 2 x pshufb + 1 x por
11619 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11621 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11622 SelectionDAG &DAG) {
11623 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11624 SDValue V1 = SVOp->getOperand(0);
11625 SDValue V2 = SVOp->getOperand(1);
11627 SmallVector<int, 8> MaskVals;
11629 // Determine if more than 1 of the words in each of the low and high quadwords
11630 // of the result come from the same quadword of one of the two inputs. Undef
11631 // mask values count as coming from any quadword, for better codegen.
11633 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11634 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11635 unsigned LoQuad[] = { 0, 0, 0, 0 };
11636 unsigned HiQuad[] = { 0, 0, 0, 0 };
11637 // Indices of quads used.
11638 std::bitset<4> InputQuads;
11639 for (unsigned i = 0; i < 8; ++i) {
11640 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11641 int EltIdx = SVOp->getMaskElt(i);
11642 MaskVals.push_back(EltIdx);
11650 ++Quad[EltIdx / 4];
11651 InputQuads.set(EltIdx / 4);
11654 int BestLoQuad = -1;
11655 unsigned MaxQuad = 1;
11656 for (unsigned i = 0; i < 4; ++i) {
11657 if (LoQuad[i] > MaxQuad) {
11659 MaxQuad = LoQuad[i];
11663 int BestHiQuad = -1;
11665 for (unsigned i = 0; i < 4; ++i) {
11666 if (HiQuad[i] > MaxQuad) {
11668 MaxQuad = HiQuad[i];
11672 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11673 // of the two input vectors, shuffle them into one input vector so only a
11674 // single pshufb instruction is necessary. If there are more than 2 input
11675 // quads, disable the next transformation since it does not help SSSE3.
11676 bool V1Used = InputQuads[0] || InputQuads[1];
11677 bool V2Used = InputQuads[2] || InputQuads[3];
11678 if (Subtarget->hasSSSE3()) {
11679 if (InputQuads.count() == 2 && V1Used && V2Used) {
11680 BestLoQuad = InputQuads[0] ? 0 : 1;
11681 BestHiQuad = InputQuads[2] ? 2 : 3;
11683 if (InputQuads.count() > 2) {
11689 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11690 // the shuffle mask. If a quad is scored as -1, that means that it contains
11691 // words from all 4 input quadwords.
11693 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11695 BestLoQuad < 0 ? 0 : BestLoQuad,
11696 BestHiQuad < 0 ? 1 : BestHiQuad
11698 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11699 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11700 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11701 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11703 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11704 // source words for the shuffle, to aid later transformations.
11705 bool AllWordsInNewV = true;
11706 bool InOrder[2] = { true, true };
11707 for (unsigned i = 0; i != 8; ++i) {
11708 int idx = MaskVals[i];
11710 InOrder[i/4] = false;
11711 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11713 AllWordsInNewV = false;
11717 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11718 if (AllWordsInNewV) {
11719 for (int i = 0; i != 8; ++i) {
11720 int idx = MaskVals[i];
11723 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11724 if ((idx != i) && idx < 4)
11726 if ((idx != i) && idx > 3)
11735 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11736 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11737 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11738 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11739 unsigned TargetMask = 0;
11740 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11741 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11742 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11743 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11744 getShufflePSHUFLWImmediate(SVOp);
11745 V1 = NewV.getOperand(0);
11746 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11750 // Promote splats to a larger type which usually leads to more efficient code.
11751 // FIXME: Is this true if pshufb is available?
11752 if (SVOp->isSplat())
11753 return PromoteSplat(SVOp, DAG);
11755 // If we have SSSE3, and all words of the result are from 1 input vector,
11756 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11757 // is present, fall back to case 4.
11758 if (Subtarget->hasSSSE3()) {
11759 SmallVector<SDValue,16> pshufbMask;
11761 // If we have elements from both input vectors, set the high bit of the
11762 // shuffle mask element to zero out elements that come from V2 in the V1
11763 // mask, and elements that come from V1 in the V2 mask, so that the two
11764 // results can be OR'd together.
11765 bool TwoInputs = V1Used && V2Used;
11766 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11768 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11770 // Calculate the shuffle mask for the second input, shuffle it, and
11771 // OR it with the first shuffled input.
11772 CommuteVectorShuffleMask(MaskVals, 8);
11773 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11774 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11775 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11778 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11779 // and update MaskVals with new element order.
11780 std::bitset<8> InOrder;
11781 if (BestLoQuad >= 0) {
11782 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11783 for (int i = 0; i != 4; ++i) {
11784 int idx = MaskVals[i];
11787 } else if ((idx / 4) == BestLoQuad) {
11788 MaskV[i] = idx & 3;
11792 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11795 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11796 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11797 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11798 NewV.getOperand(0),
11799 getShufflePSHUFLWImmediate(SVOp), DAG);
11803 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11804 // and update MaskVals with the new element order.
11805 if (BestHiQuad >= 0) {
11806 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11807 for (unsigned i = 4; i != 8; ++i) {
11808 int idx = MaskVals[i];
11811 } else if ((idx / 4) == BestHiQuad) {
11812 MaskV[i] = (idx & 3) + 4;
11816 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11819 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11820 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11821 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11822 NewV.getOperand(0),
11823 getShufflePSHUFHWImmediate(SVOp), DAG);
11827 // In case BestHi & BestLo were both -1, which means each quadword has a word
11828 // from each of the four input quadwords, calculate the InOrder bitvector now
11829 // before falling through to the insert/extract cleanup.
11830 if (BestLoQuad == -1 && BestHiQuad == -1) {
11832 for (int i = 0; i != 8; ++i)
11833 if (MaskVals[i] < 0 || MaskVals[i] == i)
11837 // The other elements are put in the right place using pextrw and pinsrw.
11838 for (unsigned i = 0; i != 8; ++i) {
11841 int EltIdx = MaskVals[i];
11844 SDValue ExtOp = (EltIdx < 8) ?
11845 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11846 DAG.getIntPtrConstant(EltIdx)) :
11847 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11848 DAG.getIntPtrConstant(EltIdx - 8));
11849 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11850 DAG.getIntPtrConstant(i));
11855 /// \brief v16i16 shuffles
11857 /// FIXME: We only support generation of a single pshufb currently. We can
11858 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11859 /// well (e.g 2 x pshufb + 1 x por).
11861 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11862 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11863 SDValue V1 = SVOp->getOperand(0);
11864 SDValue V2 = SVOp->getOperand(1);
11867 if (V2.getOpcode() != ISD::UNDEF)
11870 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11871 return getPSHUFB(MaskVals, V1, dl, DAG);
11874 // v16i8 shuffles - Prefer shuffles in the following order:
11875 // 1. [ssse3] 1 x pshufb
11876 // 2. [ssse3] 2 x pshufb + 1 x por
11877 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11878 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11879 const X86Subtarget* Subtarget,
11880 SelectionDAG &DAG) {
11881 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11882 SDValue V1 = SVOp->getOperand(0);
11883 SDValue V2 = SVOp->getOperand(1);
11885 ArrayRef<int> MaskVals = SVOp->getMask();
11887 // Promote splats to a larger type which usually leads to more efficient code.
11888 // FIXME: Is this true if pshufb is available?
11889 if (SVOp->isSplat())
11890 return PromoteSplat(SVOp, DAG);
11892 // If we have SSSE3, case 1 is generated when all result bytes come from
11893 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11894 // present, fall back to case 3.
11896 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11897 if (Subtarget->hasSSSE3()) {
11898 SmallVector<SDValue,16> pshufbMask;
11900 // If all result elements are from one input vector, then only translate
11901 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11903 // Otherwise, we have elements from both input vectors, and must zero out
11904 // elements that come from V2 in the first mask, and V1 in the second mask
11905 // so that we can OR them together.
11906 for (unsigned i = 0; i != 16; ++i) {
11907 int EltIdx = MaskVals[i];
11908 if (EltIdx < 0 || EltIdx >= 16)
11910 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11912 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11913 DAG.getNode(ISD::BUILD_VECTOR, dl,
11914 MVT::v16i8, pshufbMask));
11916 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11917 // the 2nd operand if it's undefined or zero.
11918 if (V2.getOpcode() == ISD::UNDEF ||
11919 ISD::isBuildVectorAllZeros(V2.getNode()))
11922 // Calculate the shuffle mask for the second input, shuffle it, and
11923 // OR it with the first shuffled input.
11924 pshufbMask.clear();
11925 for (unsigned i = 0; i != 16; ++i) {
11926 int EltIdx = MaskVals[i];
11927 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11928 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11930 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11931 DAG.getNode(ISD::BUILD_VECTOR, dl,
11932 MVT::v16i8, pshufbMask));
11933 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11936 // No SSSE3 - Calculate in place words and then fix all out of place words
11937 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
11938 // the 16 different words that comprise the two doublequadword input vectors.
11939 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11940 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
11942 for (int i = 0; i != 8; ++i) {
11943 int Elt0 = MaskVals[i*2];
11944 int Elt1 = MaskVals[i*2+1];
11946 // This word of the result is all undef, skip it.
11947 if (Elt0 < 0 && Elt1 < 0)
11950 // This word of the result is already in the correct place, skip it.
11951 if ((Elt0 == i*2) && (Elt1 == i*2+1))
11954 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
11955 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
11958 // If Elt0 and Elt1 are defined, are consecutive, and can be load
11959 // using a single extract together, load it and store it.
11960 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
11961 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11962 DAG.getIntPtrConstant(Elt1 / 2));
11963 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11964 DAG.getIntPtrConstant(i));
11968 // If Elt1 is defined, extract it from the appropriate source. If the
11969 // source byte is not also odd, shift the extracted word left 8 bits
11970 // otherwise clear the bottom 8 bits if we need to do an or.
11972 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11973 DAG.getIntPtrConstant(Elt1 / 2));
11974 if ((Elt1 & 1) == 0)
11975 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
11977 TLI.getShiftAmountTy(InsElt.getValueType())));
11978 else if (Elt0 >= 0)
11979 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
11980 DAG.getConstant(0xFF00, MVT::i16));
11982 // If Elt0 is defined, extract it from the appropriate source. If the
11983 // source byte is not also even, shift the extracted word right 8 bits. If
11984 // Elt1 was also defined, OR the extracted values together before
11985 // inserting them in the result.
11987 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
11988 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
11989 if ((Elt0 & 1) != 0)
11990 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
11992 TLI.getShiftAmountTy(InsElt0.getValueType())));
11993 else if (Elt1 >= 0)
11994 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
11995 DAG.getConstant(0x00FF, MVT::i16));
11996 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
11999 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
12000 DAG.getIntPtrConstant(i));
12002 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
12005 // v32i8 shuffles - Translate to VPSHUFB if possible.
12007 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
12008 const X86Subtarget *Subtarget,
12009 SelectionDAG &DAG) {
12010 MVT VT = SVOp->getSimpleValueType(0);
12011 SDValue V1 = SVOp->getOperand(0);
12012 SDValue V2 = SVOp->getOperand(1);
12014 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
12016 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12017 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
12018 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
12020 // VPSHUFB may be generated if
12021 // (1) one of input vector is undefined or zeroinitializer.
12022 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
12023 // And (2) the mask indexes don't cross the 128-bit lane.
12024 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
12025 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
12028 if (V1IsAllZero && !V2IsAllZero) {
12029 CommuteVectorShuffleMask(MaskVals, 32);
12032 return getPSHUFB(MaskVals, V1, dl, DAG);
12035 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
12036 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
12037 /// done when every pair / quad of shuffle mask elements point to elements in
12038 /// the right sequence. e.g.
12039 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
12041 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
12042 SelectionDAG &DAG) {
12043 MVT VT = SVOp->getSimpleValueType(0);
12045 unsigned NumElems = VT.getVectorNumElements();
12048 switch (VT.SimpleTy) {
12049 default: llvm_unreachable("Unexpected!");
12052 return SDValue(SVOp, 0);
12053 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
12054 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
12055 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
12056 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
12057 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
12058 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
12061 SmallVector<int, 8> MaskVec;
12062 for (unsigned i = 0; i != NumElems; i += Scale) {
12064 for (unsigned j = 0; j != Scale; ++j) {
12065 int EltIdx = SVOp->getMaskElt(i+j);
12069 StartIdx = (EltIdx / Scale);
12070 if (EltIdx != (int)(StartIdx*Scale + j))
12073 MaskVec.push_back(StartIdx);
12076 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
12077 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
12078 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
12081 /// getVZextMovL - Return a zero-extending vector move low node.
12083 static SDValue getVZextMovL(MVT VT, MVT OpVT,
12084 SDValue SrcOp, SelectionDAG &DAG,
12085 const X86Subtarget *Subtarget, SDLoc dl) {
12086 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
12087 LoadSDNode *LD = nullptr;
12088 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
12089 LD = dyn_cast<LoadSDNode>(SrcOp);
12091 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
12093 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
12094 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
12095 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
12096 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
12097 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
12099 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
12100 return DAG.getNode(ISD::BITCAST, dl, VT,
12101 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12102 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
12104 SrcOp.getOperand(0)
12110 return DAG.getNode(ISD::BITCAST, dl, VT,
12111 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12112 DAG.getNode(ISD::BITCAST, dl,
12116 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
12117 /// which could not be matched by any known target speficic shuffle
12119 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12121 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
12122 if (NewOp.getNode())
12125 MVT VT = SVOp->getSimpleValueType(0);
12127 unsigned NumElems = VT.getVectorNumElements();
12128 unsigned NumLaneElems = NumElems / 2;
12131 MVT EltVT = VT.getVectorElementType();
12132 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
12135 SmallVector<int, 16> Mask;
12136 for (unsigned l = 0; l < 2; ++l) {
12137 // Build a shuffle mask for the output, discovering on the fly which
12138 // input vectors to use as shuffle operands (recorded in InputUsed).
12139 // If building a suitable shuffle vector proves too hard, then bail
12140 // out with UseBuildVector set.
12141 bool UseBuildVector = false;
12142 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
12143 unsigned LaneStart = l * NumLaneElems;
12144 for (unsigned i = 0; i != NumLaneElems; ++i) {
12145 // The mask element. This indexes into the input.
12146 int Idx = SVOp->getMaskElt(i+LaneStart);
12148 // the mask element does not index into any input vector.
12149 Mask.push_back(-1);
12153 // The input vector this mask element indexes into.
12154 int Input = Idx / NumLaneElems;
12156 // Turn the index into an offset from the start of the input vector.
12157 Idx -= Input * NumLaneElems;
12159 // Find or create a shuffle vector operand to hold this input.
12161 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
12162 if (InputUsed[OpNo] == Input)
12163 // This input vector is already an operand.
12165 if (InputUsed[OpNo] < 0) {
12166 // Create a new operand for this input vector.
12167 InputUsed[OpNo] = Input;
12172 if (OpNo >= array_lengthof(InputUsed)) {
12173 // More than two input vectors used! Give up on trying to create a
12174 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
12175 UseBuildVector = true;
12179 // Add the mask index for the new shuffle vector.
12180 Mask.push_back(Idx + OpNo * NumLaneElems);
12183 if (UseBuildVector) {
12184 SmallVector<SDValue, 16> SVOps;
12185 for (unsigned i = 0; i != NumLaneElems; ++i) {
12186 // The mask element. This indexes into the input.
12187 int Idx = SVOp->getMaskElt(i+LaneStart);
12189 SVOps.push_back(DAG.getUNDEF(EltVT));
12193 // The input vector this mask element indexes into.
12194 int Input = Idx / NumElems;
12196 // Turn the index into an offset from the start of the input vector.
12197 Idx -= Input * NumElems;
12199 // Extract the vector element by hand.
12200 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
12201 SVOp->getOperand(Input),
12202 DAG.getIntPtrConstant(Idx)));
12205 // Construct the output using a BUILD_VECTOR.
12206 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
12207 } else if (InputUsed[0] < 0) {
12208 // No input vectors were used! The result is undefined.
12209 Output[l] = DAG.getUNDEF(NVT);
12211 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
12212 (InputUsed[0] % 2) * NumLaneElems,
12214 // If only one input was used, use an undefined vector for the other.
12215 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
12216 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
12217 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
12218 // At least one input vector was used. Create a new shuffle vector.
12219 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
12225 // Concatenate the result back
12226 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
12229 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
12230 /// 4 elements, and match them with several different shuffle types.
12232 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12233 SDValue V1 = SVOp->getOperand(0);
12234 SDValue V2 = SVOp->getOperand(1);
12236 MVT VT = SVOp->getSimpleValueType(0);
12238 assert(VT.is128BitVector() && "Unsupported vector size");
12240 std::pair<int, int> Locs[4];
12241 int Mask1[] = { -1, -1, -1, -1 };
12242 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
12244 unsigned NumHi = 0;
12245 unsigned NumLo = 0;
12246 for (unsigned i = 0; i != 4; ++i) {
12247 int Idx = PermMask[i];
12249 Locs[i] = std::make_pair(-1, -1);
12251 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
12253 Locs[i] = std::make_pair(0, NumLo);
12254 Mask1[NumLo] = Idx;
12257 Locs[i] = std::make_pair(1, NumHi);
12259 Mask1[2+NumHi] = Idx;
12265 if (NumLo <= 2 && NumHi <= 2) {
12266 // If no more than two elements come from either vector. This can be
12267 // implemented with two shuffles. First shuffle gather the elements.
12268 // The second shuffle, which takes the first shuffle as both of its
12269 // vector operands, put the elements into the right order.
12270 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12272 int Mask2[] = { -1, -1, -1, -1 };
12274 for (unsigned i = 0; i != 4; ++i)
12275 if (Locs[i].first != -1) {
12276 unsigned Idx = (i < 2) ? 0 : 4;
12277 Idx += Locs[i].first * 2 + Locs[i].second;
12281 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
12284 if (NumLo == 3 || NumHi == 3) {
12285 // Otherwise, we must have three elements from one vector, call it X, and
12286 // one element from the other, call it Y. First, use a shufps to build an
12287 // intermediate vector with the one element from Y and the element from X
12288 // that will be in the same half in the final destination (the indexes don't
12289 // matter). Then, use a shufps to build the final vector, taking the half
12290 // containing the element from Y from the intermediate, and the other half
12293 // Normalize it so the 3 elements come from V1.
12294 CommuteVectorShuffleMask(PermMask, 4);
12298 // Find the element from V2.
12300 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
12301 int Val = PermMask[HiIndex];
12308 Mask1[0] = PermMask[HiIndex];
12310 Mask1[2] = PermMask[HiIndex^1];
12312 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12314 if (HiIndex >= 2) {
12315 Mask1[0] = PermMask[0];
12316 Mask1[1] = PermMask[1];
12317 Mask1[2] = HiIndex & 1 ? 6 : 4;
12318 Mask1[3] = HiIndex & 1 ? 4 : 6;
12319 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12322 Mask1[0] = HiIndex & 1 ? 2 : 0;
12323 Mask1[1] = HiIndex & 1 ? 0 : 2;
12324 Mask1[2] = PermMask[2];
12325 Mask1[3] = PermMask[3];
12330 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
12333 // Break it into (shuffle shuffle_hi, shuffle_lo).
12334 int LoMask[] = { -1, -1, -1, -1 };
12335 int HiMask[] = { -1, -1, -1, -1 };
12337 int *MaskPtr = LoMask;
12338 unsigned MaskIdx = 0;
12339 unsigned LoIdx = 0;
12340 unsigned HiIdx = 2;
12341 for (unsigned i = 0; i != 4; ++i) {
12348 int Idx = PermMask[i];
12350 Locs[i] = std::make_pair(-1, -1);
12351 } else if (Idx < 4) {
12352 Locs[i] = std::make_pair(MaskIdx, LoIdx);
12353 MaskPtr[LoIdx] = Idx;
12356 Locs[i] = std::make_pair(MaskIdx, HiIdx);
12357 MaskPtr[HiIdx] = Idx;
12362 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
12363 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
12364 int MaskOps[] = { -1, -1, -1, -1 };
12365 for (unsigned i = 0; i != 4; ++i)
12366 if (Locs[i].first != -1)
12367 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
12368 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
12371 static bool MayFoldVectorLoad(SDValue V) {
12372 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12373 V = V.getOperand(0);
12375 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12376 V = V.getOperand(0);
12377 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12378 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12379 // BUILD_VECTOR (load), undef
12380 V = V.getOperand(0);
12382 return MayFoldLoad(V);
12386 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12387 MVT VT = Op.getSimpleValueType();
12389 // Canonicalize to v2f64.
12390 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12391 return DAG.getNode(ISD::BITCAST, dl, VT,
12392 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12397 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12399 SDValue V1 = Op.getOperand(0);
12400 SDValue V2 = Op.getOperand(1);
12401 MVT VT = Op.getSimpleValueType();
12403 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12405 if (HasSSE2 && VT == MVT::v2f64)
12406 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12408 // v4f32 or v4i32: canonicalize to v4f32 (which is legal for SSE1)
12409 return DAG.getNode(ISD::BITCAST, dl, VT,
12410 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12411 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12412 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12416 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12417 SDValue V1 = Op.getOperand(0);
12418 SDValue V2 = Op.getOperand(1);
12419 MVT VT = Op.getSimpleValueType();
12421 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12422 "unsupported shuffle type");
12424 if (V2.getOpcode() == ISD::UNDEF)
12428 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12432 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12433 SDValue V1 = Op.getOperand(0);
12434 SDValue V2 = Op.getOperand(1);
12435 MVT VT = Op.getSimpleValueType();
12436 unsigned NumElems = VT.getVectorNumElements();
12438 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12439 // operand of these instructions is only memory, so check if there's a
12440 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12442 bool CanFoldLoad = false;
12444 // Trivial case, when V2 comes from a load.
12445 if (MayFoldVectorLoad(V2))
12446 CanFoldLoad = true;
12448 // When V1 is a load, it can be folded later into a store in isel, example:
12449 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12451 // (MOVLPSmr addr:$src1, VR128:$src2)
12452 // So, recognize this potential and also use MOVLPS or MOVLPD
12453 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12454 CanFoldLoad = true;
12456 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12458 if (HasSSE2 && NumElems == 2)
12459 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12462 // If we don't care about the second element, proceed to use movss.
12463 if (SVOp->getMaskElt(1) != -1)
12464 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12467 // movl and movlp will both match v2i64, but v2i64 is never matched by
12468 // movl earlier because we make it strict to avoid messing with the movlp load
12469 // folding logic (see the code above getMOVLP call). Match it here then,
12470 // this is horrible, but will stay like this until we move all shuffle
12471 // matching to x86 specific nodes. Note that for the 1st condition all
12472 // types are matched with movsd.
12474 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12475 // as to remove this logic from here, as much as possible
12476 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12477 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12478 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12481 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12483 // Invert the operand order and use SHUFPS to match it.
12484 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12485 getShuffleSHUFImmediate(SVOp), DAG);
12488 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12489 SelectionDAG &DAG) {
12491 MVT VT = Load->getSimpleValueType(0);
12492 MVT EVT = VT.getVectorElementType();
12493 SDValue Addr = Load->getOperand(1);
12494 SDValue NewAddr = DAG.getNode(
12495 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12496 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12499 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12500 DAG.getMachineFunction().getMachineMemOperand(
12501 Load->getMemOperand(), 0, EVT.getStoreSize()));
12505 // It is only safe to call this function if isINSERTPSMask is true for
12506 // this shufflevector mask.
12507 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12508 SelectionDAG &DAG) {
12509 // Generate an insertps instruction when inserting an f32 from memory onto a
12510 // v4f32 or when copying a member from one v4f32 to another.
12511 // We also use it for transferring i32 from one register to another,
12512 // since it simply copies the same bits.
12513 // If we're transferring an i32 from memory to a specific element in a
12514 // register, we output a generic DAG that will match the PINSRD
12516 MVT VT = SVOp->getSimpleValueType(0);
12517 MVT EVT = VT.getVectorElementType();
12518 SDValue V1 = SVOp->getOperand(0);
12519 SDValue V2 = SVOp->getOperand(1);
12520 auto Mask = SVOp->getMask();
12521 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12522 "unsupported vector type for insertps/pinsrd");
12524 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12525 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12526 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12530 unsigned DestIndex;
12534 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12537 // If we have 1 element from each vector, we have to check if we're
12538 // changing V1's element's place. If so, we're done. Otherwise, we
12539 // should assume we're changing V2's element's place and behave
12541 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12542 assert(DestIndex <= INT32_MAX && "truncated destination index");
12543 if (FromV1 == FromV2 &&
12544 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12548 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12551 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12552 "More than one element from V1 and from V2, or no elements from one "
12553 "of the vectors. This case should not have returned true from "
12558 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12561 // Get an index into the source vector in the range [0,4) (the mask is
12562 // in the range [0,8) because it can address V1 and V2)
12563 unsigned SrcIndex = Mask[DestIndex] % 4;
12564 if (MayFoldLoad(From)) {
12565 // Trivial case, when From comes from a load and is only used by the
12566 // shuffle. Make it use insertps from the vector that we need from that
12569 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12570 if (!NewLoad.getNode())
12573 if (EVT == MVT::f32) {
12574 // Create this as a scalar to vector to match the instruction pattern.
12575 SDValue LoadScalarToVector =
12576 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12577 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12578 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12580 } else { // EVT == MVT::i32
12581 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12582 // instruction, to match the PINSRD instruction, which loads an i32 to a
12583 // certain vector element.
12584 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12585 DAG.getConstant(DestIndex, MVT::i32));
12589 // Vector-element-to-vector
12590 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12591 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12594 // Reduce a vector shuffle to zext.
12595 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12596 SelectionDAG &DAG) {
12597 // PMOVZX is only available from SSE41.
12598 if (!Subtarget->hasSSE41())
12601 MVT VT = Op.getSimpleValueType();
12603 // Only AVX2 support 256-bit vector integer extending.
12604 if (!Subtarget->hasInt256() && VT.is256BitVector())
12607 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12609 SDValue V1 = Op.getOperand(0);
12610 SDValue V2 = Op.getOperand(1);
12611 unsigned NumElems = VT.getVectorNumElements();
12613 // Extending is an unary operation and the element type of the source vector
12614 // won't be equal to or larger than i64.
12615 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12616 VT.getVectorElementType() == MVT::i64)
12619 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12620 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12621 while ((1U << Shift) < NumElems) {
12622 if (SVOp->getMaskElt(1U << Shift) == 1)
12625 // The maximal ratio is 8, i.e. from i8 to i64.
12630 // Check the shuffle mask.
12631 unsigned Mask = (1U << Shift) - 1;
12632 for (unsigned i = 0; i != NumElems; ++i) {
12633 int EltIdx = SVOp->getMaskElt(i);
12634 if ((i & Mask) != 0 && EltIdx != -1)
12636 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12640 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12641 MVT NeVT = MVT::getIntegerVT(NBits);
12642 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12644 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12647 return DAG.getNode(ISD::BITCAST, DL, VT,
12648 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12651 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12652 SelectionDAG &DAG) {
12653 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12654 MVT VT = Op.getSimpleValueType();
12656 SDValue V1 = Op.getOperand(0);
12657 SDValue V2 = Op.getOperand(1);
12659 if (isZeroShuffle(SVOp))
12660 return getZeroVector(VT, Subtarget, DAG, dl);
12662 // Handle splat operations
12663 if (SVOp->isSplat()) {
12664 // Use vbroadcast whenever the splat comes from a foldable load
12665 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12666 if (Broadcast.getNode())
12670 // Check integer expanding shuffles.
12671 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12672 if (NewOp.getNode())
12675 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12677 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12678 VT == MVT::v32i8) {
12679 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12680 if (NewOp.getNode())
12681 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12682 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12683 // FIXME: Figure out a cleaner way to do this.
12684 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12685 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12686 if (NewOp.getNode()) {
12687 MVT NewVT = NewOp.getSimpleValueType();
12688 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12689 NewVT, true, false))
12690 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12693 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12694 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12695 if (NewOp.getNode()) {
12696 MVT NewVT = NewOp.getSimpleValueType();
12697 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12698 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12707 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12708 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12709 SDValue V1 = Op.getOperand(0);
12710 SDValue V2 = Op.getOperand(1);
12711 MVT VT = Op.getSimpleValueType();
12713 unsigned NumElems = VT.getVectorNumElements();
12714 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12715 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12716 bool V1IsSplat = false;
12717 bool V2IsSplat = false;
12718 bool HasSSE2 = Subtarget->hasSSE2();
12719 bool HasFp256 = Subtarget->hasFp256();
12720 bool HasInt256 = Subtarget->hasInt256();
12721 MachineFunction &MF = DAG.getMachineFunction();
12723 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
12725 // Check if we should use the experimental vector shuffle lowering. If so,
12726 // delegate completely to that code path.
12727 if (ExperimentalVectorShuffleLowering)
12728 return lowerVectorShuffle(Op, Subtarget, DAG);
12730 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12732 if (V1IsUndef && V2IsUndef)
12733 return DAG.getUNDEF(VT);
12735 // When we create a shuffle node we put the UNDEF node to second operand,
12736 // but in some cases the first operand may be transformed to UNDEF.
12737 // In this case we should just commute the node.
12739 return DAG.getCommutedVectorShuffle(*SVOp);
12741 // Vector shuffle lowering takes 3 steps:
12743 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12744 // narrowing and commutation of operands should be handled.
12745 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12747 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12748 // so the shuffle can be broken into other shuffles and the legalizer can
12749 // try the lowering again.
12751 // The general idea is that no vector_shuffle operation should be left to
12752 // be matched during isel, all of them must be converted to a target specific
12755 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12756 // narrowing and commutation of operands should be handled. The actual code
12757 // doesn't include all of those, work in progress...
12758 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12759 if (NewOp.getNode())
12762 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12764 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12765 // unpckh_undef). Only use pshufd if speed is more important than size.
12766 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12767 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12768 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12769 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12771 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12772 V2IsUndef && MayFoldVectorLoad(V1))
12773 return getMOVDDup(Op, dl, V1, DAG);
12775 if (isMOVHLPS_v_undef_Mask(M, VT))
12776 return getMOVHighToLow(Op, dl, DAG);
12778 // Use to match splats
12779 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12780 (VT == MVT::v2f64 || VT == MVT::v2i64))
12781 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12783 if (isPSHUFDMask(M, VT)) {
12784 // The actual implementation will match the mask in the if above and then
12785 // during isel it can match several different instructions, not only pshufd
12786 // as its name says, sad but true, emulate the behavior for now...
12787 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12788 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12790 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12792 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12793 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12795 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12796 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12799 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12803 if (isPALIGNRMask(M, VT, Subtarget))
12804 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12805 getShufflePALIGNRImmediate(SVOp),
12808 if (isVALIGNMask(M, VT, Subtarget))
12809 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12810 getShuffleVALIGNImmediate(SVOp),
12813 // Check if this can be converted into a logical shift.
12814 bool isLeft = false;
12815 unsigned ShAmt = 0;
12817 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12818 if (isShift && ShVal.hasOneUse()) {
12819 // If the shifted value has multiple uses, it may be cheaper to use
12820 // v_set0 + movlhps or movhlps, etc.
12821 MVT EltVT = VT.getVectorElementType();
12822 ShAmt *= EltVT.getSizeInBits();
12823 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12826 if (isMOVLMask(M, VT)) {
12827 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12828 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12829 if (!isMOVLPMask(M, VT)) {
12830 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12831 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12833 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12834 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12838 // FIXME: fold these into legal mask.
12839 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12840 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12842 if (isMOVHLPSMask(M, VT))
12843 return getMOVHighToLow(Op, dl, DAG);
12845 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12846 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12848 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12849 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12851 if (isMOVLPMask(M, VT))
12852 return getMOVLP(Op, dl, DAG, HasSSE2);
12854 if (ShouldXformToMOVHLPS(M, VT) ||
12855 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12856 return DAG.getCommutedVectorShuffle(*SVOp);
12859 // No better options. Use a vshldq / vsrldq.
12860 MVT EltVT = VT.getVectorElementType();
12861 ShAmt *= EltVT.getSizeInBits();
12862 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12865 bool Commuted = false;
12866 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12867 // 1,1,1,1 -> v8i16 though.
12868 BitVector UndefElements;
12869 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12870 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12872 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12873 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12876 // Canonicalize the splat or undef, if present, to be on the RHS.
12877 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12878 CommuteVectorShuffleMask(M, NumElems);
12880 std::swap(V1IsSplat, V2IsSplat);
12884 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12885 // Shuffling low element of v1 into undef, just return v1.
12888 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12889 // the instruction selector will not match, so get a canonical MOVL with
12890 // swapped operands to undo the commute.
12891 return getMOVL(DAG, dl, VT, V2, V1);
12894 if (isUNPCKLMask(M, VT, HasInt256))
12895 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12897 if (isUNPCKHMask(M, VT, HasInt256))
12898 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12901 // Normalize mask so all entries that point to V2 points to its first
12902 // element then try to match unpck{h|l} again. If match, return a
12903 // new vector_shuffle with the corrected mask.p
12904 SmallVector<int, 8> NewMask(M.begin(), M.end());
12905 NormalizeMask(NewMask, NumElems);
12906 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12907 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12908 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12909 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12913 // Commute is back and try unpck* again.
12914 // FIXME: this seems wrong.
12915 CommuteVectorShuffleMask(M, NumElems);
12917 std::swap(V1IsSplat, V2IsSplat);
12919 if (isUNPCKLMask(M, VT, HasInt256))
12920 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12922 if (isUNPCKHMask(M, VT, HasInt256))
12923 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12926 // Normalize the node to match x86 shuffle ops if needed
12927 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12928 return DAG.getCommutedVectorShuffle(*SVOp);
12930 // The checks below are all present in isShuffleMaskLegal, but they are
12931 // inlined here right now to enable us to directly emit target specific
12932 // nodes, and remove one by one until they don't return Op anymore.
12934 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
12935 SVOp->getSplatIndex() == 0 && V2IsUndef) {
12936 if (VT == MVT::v2f64 || VT == MVT::v2i64)
12937 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12940 if (isPSHUFHWMask(M, VT, HasInt256))
12941 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
12942 getShufflePSHUFHWImmediate(SVOp),
12945 if (isPSHUFLWMask(M, VT, HasInt256))
12946 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
12947 getShufflePSHUFLWImmediate(SVOp),
12950 unsigned MaskValue;
12951 if (isBlendMask(M, VT, Subtarget->hasSSE41(), HasInt256, &MaskValue))
12952 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
12954 if (isSHUFPMask(M, VT))
12955 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
12956 getShuffleSHUFImmediate(SVOp), DAG);
12958 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12959 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12960 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12961 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12963 //===--------------------------------------------------------------------===//
12964 // Generate target specific nodes for 128 or 256-bit shuffles only
12965 // supported in the AVX instruction set.
12968 // Handle VMOVDDUPY permutations
12969 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
12970 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
12972 // Handle VPERMILPS/D* permutations
12973 if (isVPERMILPMask(M, VT)) {
12974 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
12975 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
12976 getShuffleSHUFImmediate(SVOp), DAG);
12977 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
12978 getShuffleSHUFImmediate(SVOp), DAG);
12982 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
12983 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
12984 Idx*(NumElems/2), DAG, dl);
12986 // Handle VPERM2F128/VPERM2I128 permutations
12987 if (isVPERM2X128Mask(M, VT, HasFp256))
12988 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
12989 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
12991 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
12992 return getINSERTPS(SVOp, dl, DAG);
12995 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
12996 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
12998 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
12999 VT.is512BitVector()) {
13000 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
13001 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
13002 SmallVector<SDValue, 16> permclMask;
13003 for (unsigned i = 0; i != NumElems; ++i) {
13004 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
13007 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
13009 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
13010 return DAG.getNode(X86ISD::VPERMV, dl, VT,
13011 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
13012 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
13013 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
13016 //===--------------------------------------------------------------------===//
13017 // Since no target specific shuffle was selected for this generic one,
13018 // lower it into other known shuffles. FIXME: this isn't true yet, but
13019 // this is the plan.
13022 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
13023 if (VT == MVT::v8i16) {
13024 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
13025 if (NewOp.getNode())
13029 if (VT == MVT::v16i16 && HasInt256) {
13030 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
13031 if (NewOp.getNode())
13035 if (VT == MVT::v16i8) {
13036 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
13037 if (NewOp.getNode())
13041 if (VT == MVT::v32i8) {
13042 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
13043 if (NewOp.getNode())
13047 // Handle all 128-bit wide vectors with 4 elements, and match them with
13048 // several different shuffle types.
13049 if (NumElems == 4 && VT.is128BitVector())
13050 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
13052 // Handle general 256-bit shuffles
13053 if (VT.is256BitVector())
13054 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
13059 // This function assumes its argument is a BUILD_VECTOR of constants or
13060 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
13062 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
13063 unsigned &MaskValue) {
13065 unsigned NumElems = BuildVector->getNumOperands();
13066 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
13067 unsigned NumLanes = (NumElems - 1) / 8 + 1;
13068 unsigned NumElemsInLane = NumElems / NumLanes;
13070 // Blend for v16i16 should be symetric for the both lanes.
13071 for (unsigned i = 0; i < NumElemsInLane; ++i) {
13072 SDValue EltCond = BuildVector->getOperand(i);
13073 SDValue SndLaneEltCond =
13074 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
13076 int Lane1Cond = -1, Lane2Cond = -1;
13077 if (isa<ConstantSDNode>(EltCond))
13078 Lane1Cond = !isZero(EltCond);
13079 if (isa<ConstantSDNode>(SndLaneEltCond))
13080 Lane2Cond = !isZero(SndLaneEltCond);
13082 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
13083 // Lane1Cond != 0, means we want the first argument.
13084 // Lane1Cond == 0, means we want the second argument.
13085 // The encoding of this argument is 0 for the first argument, 1
13086 // for the second. Therefore, invert the condition.
13087 MaskValue |= !Lane1Cond << i;
13088 else if (Lane1Cond < 0)
13089 MaskValue |= !Lane2Cond << i;
13096 /// \brief Try to lower a VSELECT instruction to an immediate-controlled blend
13098 static SDValue lowerVSELECTtoBLENDI(SDValue Op, const X86Subtarget *Subtarget,
13099 SelectionDAG &DAG) {
13100 SDValue Cond = Op.getOperand(0);
13101 SDValue LHS = Op.getOperand(1);
13102 SDValue RHS = Op.getOperand(2);
13104 MVT VT = Op.getSimpleValueType();
13105 MVT EltVT = VT.getVectorElementType();
13106 unsigned NumElems = VT.getVectorNumElements();
13108 // There is no blend with immediate in AVX-512.
13109 if (VT.is512BitVector())
13112 if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
13114 if (!Subtarget->hasInt256() && VT == MVT::v16i16)
13117 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
13120 // Check the mask for BLEND and build the value.
13121 unsigned MaskValue = 0;
13122 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
13125 // Convert i32 vectors to floating point if it is not AVX2.
13126 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
13128 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
13129 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
13131 LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
13132 RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
13135 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
13136 DAG.getConstant(MaskValue, MVT::i32));
13137 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
13140 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
13141 // A vselect where all conditions and data are constants can be optimized into
13142 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
13143 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
13144 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
13145 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
13148 SDValue BlendOp = lowerVSELECTtoBLENDI(Op, Subtarget, DAG);
13149 if (BlendOp.getNode())
13152 // Some types for vselect were previously set to Expand, not Legal or
13153 // Custom. Return an empty SDValue so we fall-through to Expand, after
13154 // the Custom lowering phase.
13155 MVT VT = Op.getSimpleValueType();
13156 switch (VT.SimpleTy) {
13161 if (Subtarget->hasBWI() && Subtarget->hasVLX())
13166 // We couldn't create a "Blend with immediate" node.
13167 // This node should still be legal, but we'll have to emit a blendv*
13172 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
13173 MVT VT = Op.getSimpleValueType();
13176 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
13179 if (VT.getSizeInBits() == 8) {
13180 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
13181 Op.getOperand(0), Op.getOperand(1));
13182 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13183 DAG.getValueType(VT));
13184 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13187 if (VT.getSizeInBits() == 16) {
13188 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13189 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
13191 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13192 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13193 DAG.getNode(ISD::BITCAST, dl,
13196 Op.getOperand(1)));
13197 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
13198 Op.getOperand(0), Op.getOperand(1));
13199 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13200 DAG.getValueType(VT));
13201 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13204 if (VT == MVT::f32) {
13205 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
13206 // the result back to FR32 register. It's only worth matching if the
13207 // result has a single use which is a store or a bitcast to i32. And in
13208 // the case of a store, it's not worth it if the index is a constant 0,
13209 // because a MOVSSmr can be used instead, which is smaller and faster.
13210 if (!Op.hasOneUse())
13212 SDNode *User = *Op.getNode()->use_begin();
13213 if ((User->getOpcode() != ISD::STORE ||
13214 (isa<ConstantSDNode>(Op.getOperand(1)) &&
13215 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
13216 (User->getOpcode() != ISD::BITCAST ||
13217 User->getValueType(0) != MVT::i32))
13219 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13220 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
13223 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
13226 if (VT == MVT::i32 || VT == MVT::i64) {
13227 // ExtractPS/pextrq works with constant index.
13228 if (isa<ConstantSDNode>(Op.getOperand(1)))
13234 /// Extract one bit from mask vector, like v16i1 or v8i1.
13235 /// AVX-512 feature.
13237 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
13238 SDValue Vec = Op.getOperand(0);
13240 MVT VecVT = Vec.getSimpleValueType();
13241 SDValue Idx = Op.getOperand(1);
13242 MVT EltVT = Op.getSimpleValueType();
13244 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
13245 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
13246 "Unexpected vector type in ExtractBitFromMaskVector");
13248 // variable index can't be handled in mask registers,
13249 // extend vector to VR512
13250 if (!isa<ConstantSDNode>(Idx)) {
13251 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13252 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
13253 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
13254 ExtVT.getVectorElementType(), Ext, Idx);
13255 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
13258 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13259 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13260 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
13261 rc = getRegClassFor(MVT::v16i1);
13262 unsigned MaxSift = rc->getSize()*8 - 1;
13263 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
13264 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13265 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
13266 DAG.getConstant(MaxSift, MVT::i8));
13267 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
13268 DAG.getIntPtrConstant(0));
13272 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
13273 SelectionDAG &DAG) const {
13275 SDValue Vec = Op.getOperand(0);
13276 MVT VecVT = Vec.getSimpleValueType();
13277 SDValue Idx = Op.getOperand(1);
13279 if (Op.getSimpleValueType() == MVT::i1)
13280 return ExtractBitFromMaskVector(Op, DAG);
13282 if (!isa<ConstantSDNode>(Idx)) {
13283 if (VecVT.is512BitVector() ||
13284 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
13285 VecVT.getVectorElementType().getSizeInBits() == 32)) {
13288 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
13289 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
13290 MaskEltVT.getSizeInBits());
13292 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
13293 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
13294 getZeroVector(MaskVT, Subtarget, DAG, dl),
13295 Idx, DAG.getConstant(0, getPointerTy()));
13296 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
13297 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
13298 Perm, DAG.getConstant(0, getPointerTy()));
13303 // If this is a 256-bit vector result, first extract the 128-bit vector and
13304 // then extract the element from the 128-bit vector.
13305 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
13307 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13308 // Get the 128-bit vector.
13309 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
13310 MVT EltVT = VecVT.getVectorElementType();
13312 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
13314 //if (IdxVal >= NumElems/2)
13315 // IdxVal -= NumElems/2;
13316 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
13317 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
13318 DAG.getConstant(IdxVal, MVT::i32));
13321 assert(VecVT.is128BitVector() && "Unexpected vector length");
13323 if (Subtarget->hasSSE41()) {
13324 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
13329 MVT VT = Op.getSimpleValueType();
13330 // TODO: handle v16i8.
13331 if (VT.getSizeInBits() == 16) {
13332 SDValue Vec = Op.getOperand(0);
13333 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13335 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13336 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13337 DAG.getNode(ISD::BITCAST, dl,
13339 Op.getOperand(1)));
13340 // Transform it so it match pextrw which produces a 32-bit result.
13341 MVT EltVT = MVT::i32;
13342 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
13343 Op.getOperand(0), Op.getOperand(1));
13344 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
13345 DAG.getValueType(VT));
13346 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13349 if (VT.getSizeInBits() == 32) {
13350 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13354 // SHUFPS the element to the lowest double word, then movss.
13355 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
13356 MVT VVT = Op.getOperand(0).getSimpleValueType();
13357 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13358 DAG.getUNDEF(VVT), Mask);
13359 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13360 DAG.getIntPtrConstant(0));
13363 if (VT.getSizeInBits() == 64) {
13364 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
13365 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
13366 // to match extract_elt for f64.
13367 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13371 // UNPCKHPD the element to the lowest double word, then movsd.
13372 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
13373 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
13374 int Mask[2] = { 1, -1 };
13375 MVT VVT = Op.getOperand(0).getSimpleValueType();
13376 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13377 DAG.getUNDEF(VVT), Mask);
13378 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13379 DAG.getIntPtrConstant(0));
13385 /// Insert one bit to mask vector, like v16i1 or v8i1.
13386 /// AVX-512 feature.
13388 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13390 SDValue Vec = Op.getOperand(0);
13391 SDValue Elt = Op.getOperand(1);
13392 SDValue Idx = Op.getOperand(2);
13393 MVT VecVT = Vec.getSimpleValueType();
13395 if (!isa<ConstantSDNode>(Idx)) {
13396 // Non constant index. Extend source and destination,
13397 // insert element and then truncate the result.
13398 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13399 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13400 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13401 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13402 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13403 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13406 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13407 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13408 if (Vec.getOpcode() == ISD::UNDEF)
13409 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13410 DAG.getConstant(IdxVal, MVT::i8));
13411 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13412 unsigned MaxSift = rc->getSize()*8 - 1;
13413 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13414 DAG.getConstant(MaxSift, MVT::i8));
13415 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13416 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13417 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13420 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13421 SelectionDAG &DAG) const {
13422 MVT VT = Op.getSimpleValueType();
13423 MVT EltVT = VT.getVectorElementType();
13425 if (EltVT == MVT::i1)
13426 return InsertBitToMaskVector(Op, DAG);
13429 SDValue N0 = Op.getOperand(0);
13430 SDValue N1 = Op.getOperand(1);
13431 SDValue N2 = Op.getOperand(2);
13432 if (!isa<ConstantSDNode>(N2))
13434 auto *N2C = cast<ConstantSDNode>(N2);
13435 unsigned IdxVal = N2C->getZExtValue();
13437 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13438 // into that, and then insert the subvector back into the result.
13439 if (VT.is256BitVector() || VT.is512BitVector()) {
13440 // Get the desired 128-bit vector half.
13441 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13443 // Insert the element into the desired half.
13444 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13445 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13447 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13448 DAG.getConstant(IdxIn128, MVT::i32));
13450 // Insert the changed part back to the 256-bit vector
13451 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13453 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13455 if (Subtarget->hasSSE41()) {
13456 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13458 if (VT == MVT::v8i16) {
13459 Opc = X86ISD::PINSRW;
13461 assert(VT == MVT::v16i8);
13462 Opc = X86ISD::PINSRB;
13465 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13467 if (N1.getValueType() != MVT::i32)
13468 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13469 if (N2.getValueType() != MVT::i32)
13470 N2 = DAG.getIntPtrConstant(IdxVal);
13471 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13474 if (EltVT == MVT::f32) {
13475 // Bits [7:6] of the constant are the source select. This will always be
13476 // zero here. The DAG Combiner may combine an extract_elt index into
13478 // bits. For example (insert (extract, 3), 2) could be matched by
13480 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13481 // Bits [5:4] of the constant are the destination select. This is the
13482 // value of the incoming immediate.
13483 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13484 // combine either bitwise AND or insert of float 0.0 to set these bits.
13485 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13486 // Create this as a scalar to vector..
13487 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13488 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13491 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13492 // PINSR* works with constant index.
13497 if (EltVT == MVT::i8)
13500 if (EltVT.getSizeInBits() == 16) {
13501 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13502 // as its second argument.
13503 if (N1.getValueType() != MVT::i32)
13504 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13505 if (N2.getValueType() != MVT::i32)
13506 N2 = DAG.getIntPtrConstant(IdxVal);
13507 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13512 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13514 MVT OpVT = Op.getSimpleValueType();
13516 // If this is a 256-bit vector result, first insert into a 128-bit
13517 // vector and then insert into the 256-bit vector.
13518 if (!OpVT.is128BitVector()) {
13519 // Insert into a 128-bit vector.
13520 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13521 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13522 OpVT.getVectorNumElements() / SizeFactor);
13524 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13526 // Insert the 128-bit vector.
13527 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13530 if (OpVT == MVT::v1i64 &&
13531 Op.getOperand(0).getValueType() == MVT::i64)
13532 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13534 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13535 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13536 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13537 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13540 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13541 // a simple subregister reference or explicit instructions to grab
13542 // upper bits of a vector.
13543 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13544 SelectionDAG &DAG) {
13546 SDValue In = Op.getOperand(0);
13547 SDValue Idx = Op.getOperand(1);
13548 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13549 MVT ResVT = Op.getSimpleValueType();
13550 MVT InVT = In.getSimpleValueType();
13552 if (Subtarget->hasFp256()) {
13553 if (ResVT.is128BitVector() &&
13554 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13555 isa<ConstantSDNode>(Idx)) {
13556 return Extract128BitVector(In, IdxVal, DAG, dl);
13558 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13559 isa<ConstantSDNode>(Idx)) {
13560 return Extract256BitVector(In, IdxVal, DAG, dl);
13566 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13567 // simple superregister reference or explicit instructions to insert
13568 // the upper bits of a vector.
13569 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13570 SelectionDAG &DAG) {
13571 if (!Subtarget->hasAVX())
13575 SDValue Vec = Op.getOperand(0);
13576 SDValue SubVec = Op.getOperand(1);
13577 SDValue Idx = Op.getOperand(2);
13579 if (!isa<ConstantSDNode>(Idx))
13582 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13583 MVT OpVT = Op.getSimpleValueType();
13584 MVT SubVecVT = SubVec.getSimpleValueType();
13586 // Fold two 16-byte subvector loads into one 32-byte load:
13587 // (insert_subvector (insert_subvector undef, (load addr), 0),
13588 // (load addr + 16), Elts/2)
13590 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
13591 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
13592 OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
13593 !Subtarget->isUnalignedMem32Slow()) {
13594 SDValue SubVec2 = Vec.getOperand(1);
13595 if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
13596 if (Idx2->getZExtValue() == 0) {
13597 SDValue Ops[] = { SubVec2, SubVec };
13598 SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
13605 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13606 SubVecVT.is128BitVector())
13607 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13609 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
13610 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13615 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13616 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13617 // one of the above mentioned nodes. It has to be wrapped because otherwise
13618 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13619 // be used to form addressing mode. These wrapped nodes will be selected
13622 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13623 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13625 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13626 // global base reg.
13627 unsigned char OpFlag = 0;
13628 unsigned WrapperKind = X86ISD::Wrapper;
13629 CodeModel::Model M = DAG.getTarget().getCodeModel();
13631 if (Subtarget->isPICStyleRIPRel() &&
13632 (M == CodeModel::Small || M == CodeModel::Kernel))
13633 WrapperKind = X86ISD::WrapperRIP;
13634 else if (Subtarget->isPICStyleGOT())
13635 OpFlag = X86II::MO_GOTOFF;
13636 else if (Subtarget->isPICStyleStubPIC())
13637 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13639 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13640 CP->getAlignment(),
13641 CP->getOffset(), OpFlag);
13643 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13644 // With PIC, the address is actually $g + Offset.
13646 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13647 DAG.getNode(X86ISD::GlobalBaseReg,
13648 SDLoc(), getPointerTy()),
13655 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13656 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13658 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13659 // global base reg.
13660 unsigned char OpFlag = 0;
13661 unsigned WrapperKind = X86ISD::Wrapper;
13662 CodeModel::Model M = DAG.getTarget().getCodeModel();
13664 if (Subtarget->isPICStyleRIPRel() &&
13665 (M == CodeModel::Small || M == CodeModel::Kernel))
13666 WrapperKind = X86ISD::WrapperRIP;
13667 else if (Subtarget->isPICStyleGOT())
13668 OpFlag = X86II::MO_GOTOFF;
13669 else if (Subtarget->isPICStyleStubPIC())
13670 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13672 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13675 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13677 // With PIC, the address is actually $g + Offset.
13679 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13680 DAG.getNode(X86ISD::GlobalBaseReg,
13681 SDLoc(), getPointerTy()),
13688 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13689 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13691 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13692 // global base reg.
13693 unsigned char OpFlag = 0;
13694 unsigned WrapperKind = X86ISD::Wrapper;
13695 CodeModel::Model M = DAG.getTarget().getCodeModel();
13697 if (Subtarget->isPICStyleRIPRel() &&
13698 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13699 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13700 OpFlag = X86II::MO_GOTPCREL;
13701 WrapperKind = X86ISD::WrapperRIP;
13702 } else if (Subtarget->isPICStyleGOT()) {
13703 OpFlag = X86II::MO_GOT;
13704 } else if (Subtarget->isPICStyleStubPIC()) {
13705 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13706 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13707 OpFlag = X86II::MO_DARWIN_NONLAZY;
13710 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13713 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13715 // With PIC, the address is actually $g + Offset.
13716 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13717 !Subtarget->is64Bit()) {
13718 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13719 DAG.getNode(X86ISD::GlobalBaseReg,
13720 SDLoc(), getPointerTy()),
13724 // For symbols that require a load from a stub to get the address, emit the
13726 if (isGlobalStubReference(OpFlag))
13727 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13728 MachinePointerInfo::getGOT(), false, false, false, 0);
13734 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13735 // Create the TargetBlockAddressAddress node.
13736 unsigned char OpFlags =
13737 Subtarget->ClassifyBlockAddressReference();
13738 CodeModel::Model M = DAG.getTarget().getCodeModel();
13739 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13740 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13742 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13745 if (Subtarget->isPICStyleRIPRel() &&
13746 (M == CodeModel::Small || M == CodeModel::Kernel))
13747 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13749 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13751 // With PIC, the address is actually $g + Offset.
13752 if (isGlobalRelativeToPICBase(OpFlags)) {
13753 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13754 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13762 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13763 int64_t Offset, SelectionDAG &DAG) const {
13764 // Create the TargetGlobalAddress node, folding in the constant
13765 // offset if it is legal.
13766 unsigned char OpFlags =
13767 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13768 CodeModel::Model M = DAG.getTarget().getCodeModel();
13770 if (OpFlags == X86II::MO_NO_FLAG &&
13771 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13772 // A direct static reference to a global.
13773 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13776 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13779 if (Subtarget->isPICStyleRIPRel() &&
13780 (M == CodeModel::Small || M == CodeModel::Kernel))
13781 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13783 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13785 // With PIC, the address is actually $g + Offset.
13786 if (isGlobalRelativeToPICBase(OpFlags)) {
13787 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13788 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13792 // For globals that require a load from a stub to get the address, emit the
13794 if (isGlobalStubReference(OpFlags))
13795 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13796 MachinePointerInfo::getGOT(), false, false, false, 0);
13798 // If there was a non-zero offset that we didn't fold, create an explicit
13799 // addition for it.
13801 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13802 DAG.getConstant(Offset, getPointerTy()));
13808 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13809 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13810 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13811 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13815 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13816 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13817 unsigned char OperandFlags, bool LocalDynamic = false) {
13818 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13819 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13821 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13822 GA->getValueType(0),
13826 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13830 SDValue Ops[] = { Chain, TGA, *InFlag };
13831 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13833 SDValue Ops[] = { Chain, TGA };
13834 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13837 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13838 MFI->setAdjustsStack(true);
13839 MFI->setHasCalls(true);
13841 SDValue Flag = Chain.getValue(1);
13842 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13845 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13847 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13850 SDLoc dl(GA); // ? function entry point might be better
13851 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13852 DAG.getNode(X86ISD::GlobalBaseReg,
13853 SDLoc(), PtrVT), InFlag);
13854 InFlag = Chain.getValue(1);
13856 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13859 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13861 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13863 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13864 X86::RAX, X86II::MO_TLSGD);
13867 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13873 // Get the start address of the TLS block for this module.
13874 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13875 .getInfo<X86MachineFunctionInfo>();
13876 MFI->incNumLocalDynamicTLSAccesses();
13880 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13881 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13884 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13885 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13886 InFlag = Chain.getValue(1);
13887 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13888 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13891 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13895 unsigned char OperandFlags = X86II::MO_DTPOFF;
13896 unsigned WrapperKind = X86ISD::Wrapper;
13897 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13898 GA->getValueType(0),
13899 GA->getOffset(), OperandFlags);
13900 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13902 // Add x@dtpoff with the base.
13903 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13906 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13907 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13908 const EVT PtrVT, TLSModel::Model model,
13909 bool is64Bit, bool isPIC) {
13912 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13913 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13914 is64Bit ? 257 : 256));
13916 SDValue ThreadPointer =
13917 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13918 MachinePointerInfo(Ptr), false, false, false, 0);
13920 unsigned char OperandFlags = 0;
13921 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13923 unsigned WrapperKind = X86ISD::Wrapper;
13924 if (model == TLSModel::LocalExec) {
13925 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13926 } else if (model == TLSModel::InitialExec) {
13928 OperandFlags = X86II::MO_GOTTPOFF;
13929 WrapperKind = X86ISD::WrapperRIP;
13931 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13934 llvm_unreachable("Unexpected model");
13937 // emit "addl x@ntpoff,%eax" (local exec)
13938 // or "addl x@indntpoff,%eax" (initial exec)
13939 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13941 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13942 GA->getOffset(), OperandFlags);
13943 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13945 if (model == TLSModel::InitialExec) {
13946 if (isPIC && !is64Bit) {
13947 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13948 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13952 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13953 MachinePointerInfo::getGOT(), false, false, false, 0);
13956 // The address of the thread local variable is the add of the thread
13957 // pointer with the offset of the variable.
13958 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13962 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13964 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13965 const GlobalValue *GV = GA->getGlobal();
13967 if (Subtarget->isTargetELF()) {
13968 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13971 case TLSModel::GeneralDynamic:
13972 if (Subtarget->is64Bit())
13973 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
13974 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
13975 case TLSModel::LocalDynamic:
13976 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
13977 Subtarget->is64Bit());
13978 case TLSModel::InitialExec:
13979 case TLSModel::LocalExec:
13980 return LowerToTLSExecModel(
13981 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
13982 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
13984 llvm_unreachable("Unknown TLS model.");
13987 if (Subtarget->isTargetDarwin()) {
13988 // Darwin only has one model of TLS. Lower to that.
13989 unsigned char OpFlag = 0;
13990 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
13991 X86ISD::WrapperRIP : X86ISD::Wrapper;
13993 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13994 // global base reg.
13995 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
13996 !Subtarget->is64Bit();
13998 OpFlag = X86II::MO_TLVP_PIC_BASE;
14000 OpFlag = X86II::MO_TLVP;
14002 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
14003 GA->getValueType(0),
14004 GA->getOffset(), OpFlag);
14005 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
14007 // With PIC32, the address is actually $g + Offset.
14009 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
14010 DAG.getNode(X86ISD::GlobalBaseReg,
14011 SDLoc(), getPointerTy()),
14014 // Lowering the machine isd will make sure everything is in the right
14016 SDValue Chain = DAG.getEntryNode();
14017 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
14018 SDValue Args[] = { Chain, Offset };
14019 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
14021 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
14022 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
14023 MFI->setAdjustsStack(true);
14025 // And our return value (tls address) is in the standard call return value
14027 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
14028 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
14029 Chain.getValue(1));
14032 if (Subtarget->isTargetKnownWindowsMSVC() ||
14033 Subtarget->isTargetWindowsGNU()) {
14034 // Just use the implicit TLS architecture
14035 // Need to generate someting similar to:
14036 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
14038 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
14039 // mov rcx, qword [rdx+rcx*8]
14040 // mov eax, .tls$:tlsvar
14041 // [rax+rcx] contains the address
14042 // Windows 64bit: gs:0x58
14043 // Windows 32bit: fs:__tls_array
14046 SDValue Chain = DAG.getEntryNode();
14048 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
14049 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
14050 // use its literal value of 0x2C.
14051 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
14052 ? Type::getInt8PtrTy(*DAG.getContext(),
14054 : Type::getInt32PtrTy(*DAG.getContext(),
14058 Subtarget->is64Bit()
14059 ? DAG.getIntPtrConstant(0x58)
14060 : (Subtarget->isTargetWindowsGNU()
14061 ? DAG.getIntPtrConstant(0x2C)
14062 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
14064 SDValue ThreadPointer =
14065 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
14066 MachinePointerInfo(Ptr), false, false, false, 0);
14068 // Load the _tls_index variable
14069 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
14070 if (Subtarget->is64Bit())
14071 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
14072 IDX, MachinePointerInfo(), MVT::i32,
14073 false, false, false, 0);
14075 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
14076 false, false, false, 0);
14078 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
14080 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
14082 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
14083 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
14084 false, false, false, 0);
14086 // Get the offset of start of .tls section
14087 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
14088 GA->getValueType(0),
14089 GA->getOffset(), X86II::MO_SECREL);
14090 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
14092 // The address of the thread local variable is the add of the thread
14093 // pointer with the offset of the variable.
14094 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
14097 llvm_unreachable("TLS not implemented for this target.");
14100 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
14101 /// and take a 2 x i32 value to shift plus a shift amount.
14102 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
14103 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
14104 MVT VT = Op.getSimpleValueType();
14105 unsigned VTBits = VT.getSizeInBits();
14107 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
14108 SDValue ShOpLo = Op.getOperand(0);
14109 SDValue ShOpHi = Op.getOperand(1);
14110 SDValue ShAmt = Op.getOperand(2);
14111 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
14112 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
14114 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14115 DAG.getConstant(VTBits - 1, MVT::i8));
14116 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
14117 DAG.getConstant(VTBits - 1, MVT::i8))
14118 : DAG.getConstant(0, VT);
14120 SDValue Tmp2, Tmp3;
14121 if (Op.getOpcode() == ISD::SHL_PARTS) {
14122 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
14123 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
14125 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
14126 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
14129 // If the shift amount is larger or equal than the width of a part we can't
14130 // rely on the results of shld/shrd. Insert a test and select the appropriate
14131 // values for large shift amounts.
14132 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14133 DAG.getConstant(VTBits, MVT::i8));
14134 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
14135 AndNode, DAG.getConstant(0, MVT::i8));
14138 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
14139 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
14140 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
14142 if (Op.getOpcode() == ISD::SHL_PARTS) {
14143 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14144 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14146 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14147 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14150 SDValue Ops[2] = { Lo, Hi };
14151 return DAG.getMergeValues(Ops, dl);
14154 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
14155 SelectionDAG &DAG) const {
14156 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
14159 if (SrcVT.isVector()) {
14160 if (SrcVT.getVectorElementType() == MVT::i1) {
14161 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
14162 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14163 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
14164 Op.getOperand(0)));
14169 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
14170 "Unknown SINT_TO_FP to lower!");
14172 // These are really Legal; return the operand so the caller accepts it as
14174 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
14176 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
14177 Subtarget->is64Bit()) {
14181 unsigned Size = SrcVT.getSizeInBits()/8;
14182 MachineFunction &MF = DAG.getMachineFunction();
14183 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
14184 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14185 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14187 MachinePointerInfo::getFixedStack(SSFI),
14189 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
14192 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
14194 SelectionDAG &DAG) const {
14198 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
14200 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
14202 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
14204 unsigned ByteSize = SrcVT.getSizeInBits()/8;
14206 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
14207 MachineMemOperand *MMO;
14209 int SSFI = FI->getIndex();
14211 DAG.getMachineFunction()
14212 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14213 MachineMemOperand::MOLoad, ByteSize, ByteSize);
14215 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
14216 StackSlot = StackSlot.getOperand(1);
14218 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
14219 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
14221 Tys, Ops, SrcVT, MMO);
14224 Chain = Result.getValue(1);
14225 SDValue InFlag = Result.getValue(2);
14227 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
14228 // shouldn't be necessary except that RFP cannot be live across
14229 // multiple blocks. When stackifier is fixed, they can be uncoupled.
14230 MachineFunction &MF = DAG.getMachineFunction();
14231 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
14232 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
14233 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14234 Tys = DAG.getVTList(MVT::Other);
14236 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
14238 MachineMemOperand *MMO =
14239 DAG.getMachineFunction()
14240 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14241 MachineMemOperand::MOStore, SSFISize, SSFISize);
14243 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
14244 Ops, Op.getValueType(), MMO);
14245 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
14246 MachinePointerInfo::getFixedStack(SSFI),
14247 false, false, false, 0);
14253 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
14254 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
14255 SelectionDAG &DAG) const {
14256 // This algorithm is not obvious. Here it is what we're trying to output:
14259 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
14260 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
14262 haddpd %xmm0, %xmm0
14264 pshufd $0x4e, %xmm0, %xmm1
14270 LLVMContext *Context = DAG.getContext();
14272 // Build some magic constants.
14273 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
14274 Constant *C0 = ConstantDataVector::get(*Context, CV0);
14275 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
14277 SmallVector<Constant*,2> CV1;
14279 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14280 APInt(64, 0x4330000000000000ULL))));
14282 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14283 APInt(64, 0x4530000000000000ULL))));
14284 Constant *C1 = ConstantVector::get(CV1);
14285 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
14287 // Load the 64-bit value into an XMM register.
14288 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
14290 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
14291 MachinePointerInfo::getConstantPool(),
14292 false, false, false, 16);
14293 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
14294 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
14297 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
14298 MachinePointerInfo::getConstantPool(),
14299 false, false, false, 16);
14300 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
14301 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
14304 if (Subtarget->hasSSE3()) {
14305 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
14306 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
14308 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
14309 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
14311 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
14312 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
14316 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
14317 DAG.getIntPtrConstant(0));
14320 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
14321 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
14322 SelectionDAG &DAG) const {
14324 // FP constant to bias correct the final result.
14325 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
14328 // Load the 32-bit value into an XMM register.
14329 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
14332 // Zero out the upper parts of the register.
14333 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
14335 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14336 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
14337 DAG.getIntPtrConstant(0));
14339 // Or the load with the bias.
14340 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
14341 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14342 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14343 MVT::v2f64, Load)),
14344 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14345 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14346 MVT::v2f64, Bias)));
14347 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14348 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
14349 DAG.getIntPtrConstant(0));
14351 // Subtract the bias.
14352 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
14354 // Handle final rounding.
14355 EVT DestVT = Op.getValueType();
14357 if (DestVT.bitsLT(MVT::f64))
14358 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
14359 DAG.getIntPtrConstant(0));
14360 if (DestVT.bitsGT(MVT::f64))
14361 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
14363 // Handle final rounding.
14367 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
14368 const X86Subtarget &Subtarget) {
14369 // The algorithm is the following:
14370 // #ifdef __SSE4_1__
14371 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14372 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14373 // (uint4) 0x53000000, 0xaa);
14375 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14376 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14378 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14379 // return (float4) lo + fhi;
14382 SDValue V = Op->getOperand(0);
14383 EVT VecIntVT = V.getValueType();
14384 bool Is128 = VecIntVT == MVT::v4i32;
14385 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
14386 // If we convert to something else than the supported type, e.g., to v4f64,
14388 if (VecFloatVT != Op->getValueType(0))
14391 unsigned NumElts = VecIntVT.getVectorNumElements();
14392 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
14393 "Unsupported custom type");
14394 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14396 // In the #idef/#else code, we have in common:
14397 // - The vector of constants:
14403 // Create the splat vector for 0x4b000000.
14404 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14405 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14406 CstLow, CstLow, CstLow, CstLow};
14407 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14408 makeArrayRef(&CstLowArray[0], NumElts));
14409 // Create the splat vector for 0x53000000.
14410 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14411 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14412 CstHigh, CstHigh, CstHigh, CstHigh};
14413 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14414 makeArrayRef(&CstHighArray[0], NumElts));
14416 // Create the right shift.
14417 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14418 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14419 CstShift, CstShift, CstShift, CstShift};
14420 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14421 makeArrayRef(&CstShiftArray[0], NumElts));
14422 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14425 if (Subtarget.hasSSE41()) {
14426 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14427 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14428 SDValue VecCstLowBitcast =
14429 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14430 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14431 // Low will be bitcasted right away, so do not bother bitcasting back to its
14433 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14434 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14435 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14436 // (uint4) 0x53000000, 0xaa);
14437 SDValue VecCstHighBitcast =
14438 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14439 SDValue VecShiftBitcast =
14440 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14441 // High will be bitcasted right away, so do not bother bitcasting back to
14442 // its original type.
14443 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14444 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14446 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14447 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14448 CstMask, CstMask, CstMask);
14449 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14450 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14451 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14453 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14454 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14457 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14458 SDValue CstFAdd = DAG.getConstantFP(
14459 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14460 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14461 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14462 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14463 makeArrayRef(&CstFAddArray[0], NumElts));
14465 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14466 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14468 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14469 // return (float4) lo + fhi;
14470 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14471 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14474 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14475 SelectionDAG &DAG) const {
14476 SDValue N0 = Op.getOperand(0);
14477 MVT SVT = N0.getSimpleValueType();
14480 switch (SVT.SimpleTy) {
14482 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14487 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14488 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14489 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14493 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14495 llvm_unreachable(nullptr);
14498 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14499 SelectionDAG &DAG) const {
14500 SDValue N0 = Op.getOperand(0);
14503 if (Op.getValueType().isVector())
14504 return lowerUINT_TO_FP_vec(Op, DAG);
14506 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14507 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14508 // the optimization here.
14509 if (DAG.SignBitIsZero(N0))
14510 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14512 MVT SrcVT = N0.getSimpleValueType();
14513 MVT DstVT = Op.getSimpleValueType();
14514 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14515 return LowerUINT_TO_FP_i64(Op, DAG);
14516 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14517 return LowerUINT_TO_FP_i32(Op, DAG);
14518 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14521 // Make a 64-bit buffer, and use it to build an FILD.
14522 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14523 if (SrcVT == MVT::i32) {
14524 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14525 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14526 getPointerTy(), StackSlot, WordOff);
14527 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14528 StackSlot, MachinePointerInfo(),
14530 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14531 OffsetSlot, MachinePointerInfo(),
14533 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14537 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14538 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14539 StackSlot, MachinePointerInfo(),
14541 // For i64 source, we need to add the appropriate power of 2 if the input
14542 // was negative. This is the same as the optimization in
14543 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14544 // we must be careful to do the computation in x87 extended precision, not
14545 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14546 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14547 MachineMemOperand *MMO =
14548 DAG.getMachineFunction()
14549 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14550 MachineMemOperand::MOLoad, 8, 8);
14552 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14553 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14554 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14557 APInt FF(32, 0x5F800000ULL);
14559 // Check whether the sign bit is set.
14560 SDValue SignSet = DAG.getSetCC(dl,
14561 getSetCCResultType(*DAG.getContext(), MVT::i64),
14562 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14565 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14566 SDValue FudgePtr = DAG.getConstantPool(
14567 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14570 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14571 SDValue Zero = DAG.getIntPtrConstant(0);
14572 SDValue Four = DAG.getIntPtrConstant(4);
14573 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14575 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14577 // Load the value out, extending it from f32 to f80.
14578 // FIXME: Avoid the extend by constructing the right constant pool?
14579 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14580 FudgePtr, MachinePointerInfo::getConstantPool(),
14581 MVT::f32, false, false, false, 4);
14582 // Extend everything to 80 bits to force it to be done on x87.
14583 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14584 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14587 std::pair<SDValue,SDValue>
14588 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14589 bool IsSigned, bool IsReplace) const {
14592 EVT DstTy = Op.getValueType();
14594 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14595 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14599 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14600 DstTy.getSimpleVT() >= MVT::i16 &&
14601 "Unknown FP_TO_INT to lower!");
14603 // These are really Legal.
14604 if (DstTy == MVT::i32 &&
14605 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14606 return std::make_pair(SDValue(), SDValue());
14607 if (Subtarget->is64Bit() &&
14608 DstTy == MVT::i64 &&
14609 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14610 return std::make_pair(SDValue(), SDValue());
14612 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14613 // stack slot, or into the FTOL runtime function.
14614 MachineFunction &MF = DAG.getMachineFunction();
14615 unsigned MemSize = DstTy.getSizeInBits()/8;
14616 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14617 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14620 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14621 Opc = X86ISD::WIN_FTOL;
14623 switch (DstTy.getSimpleVT().SimpleTy) {
14624 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14625 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14626 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14627 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14630 SDValue Chain = DAG.getEntryNode();
14631 SDValue Value = Op.getOperand(0);
14632 EVT TheVT = Op.getOperand(0).getValueType();
14633 // FIXME This causes a redundant load/store if the SSE-class value is already
14634 // in memory, such as if it is on the callstack.
14635 if (isScalarFPTypeInSSEReg(TheVT)) {
14636 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14637 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14638 MachinePointerInfo::getFixedStack(SSFI),
14640 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14642 Chain, StackSlot, DAG.getValueType(TheVT)
14645 MachineMemOperand *MMO =
14646 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14647 MachineMemOperand::MOLoad, MemSize, MemSize);
14648 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14649 Chain = Value.getValue(1);
14650 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14651 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14654 MachineMemOperand *MMO =
14655 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14656 MachineMemOperand::MOStore, MemSize, MemSize);
14658 if (Opc != X86ISD::WIN_FTOL) {
14659 // Build the FP_TO_INT*_IN_MEM
14660 SDValue Ops[] = { Chain, Value, StackSlot };
14661 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14663 return std::make_pair(FIST, StackSlot);
14665 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14666 DAG.getVTList(MVT::Other, MVT::Glue),
14668 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14669 MVT::i32, ftol.getValue(1));
14670 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14671 MVT::i32, eax.getValue(2));
14672 SDValue Ops[] = { eax, edx };
14673 SDValue pair = IsReplace
14674 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14675 : DAG.getMergeValues(Ops, DL);
14676 return std::make_pair(pair, SDValue());
14680 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14681 const X86Subtarget *Subtarget) {
14682 MVT VT = Op->getSimpleValueType(0);
14683 SDValue In = Op->getOperand(0);
14684 MVT InVT = In.getSimpleValueType();
14687 // Optimize vectors in AVX mode:
14690 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14691 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14692 // Concat upper and lower parts.
14695 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14696 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14697 // Concat upper and lower parts.
14700 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14701 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14702 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14705 if (Subtarget->hasInt256())
14706 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14708 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14709 SDValue Undef = DAG.getUNDEF(InVT);
14710 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14711 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14712 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14714 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14715 VT.getVectorNumElements()/2);
14717 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14718 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14720 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14723 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14724 SelectionDAG &DAG) {
14725 MVT VT = Op->getSimpleValueType(0);
14726 SDValue In = Op->getOperand(0);
14727 MVT InVT = In.getSimpleValueType();
14729 unsigned int NumElts = VT.getVectorNumElements();
14730 if (NumElts != 8 && NumElts != 16)
14733 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14734 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14736 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14737 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14738 // Now we have only mask extension
14739 assert(InVT.getVectorElementType() == MVT::i1);
14740 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14741 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14742 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14743 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14744 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14745 MachinePointerInfo::getConstantPool(),
14746 false, false, false, Alignment);
14748 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14749 if (VT.is512BitVector())
14751 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14754 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14755 SelectionDAG &DAG) {
14756 if (Subtarget->hasFp256()) {
14757 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14765 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14766 SelectionDAG &DAG) {
14768 MVT VT = Op.getSimpleValueType();
14769 SDValue In = Op.getOperand(0);
14770 MVT SVT = In.getSimpleValueType();
14772 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14773 return LowerZERO_EXTEND_AVX512(Op, DAG);
14775 if (Subtarget->hasFp256()) {
14776 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14781 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14782 VT.getVectorNumElements() != SVT.getVectorNumElements());
14786 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14788 MVT VT = Op.getSimpleValueType();
14789 SDValue In = Op.getOperand(0);
14790 MVT InVT = In.getSimpleValueType();
14792 if (VT == MVT::i1) {
14793 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14794 "Invalid scalar TRUNCATE operation");
14795 if (InVT.getSizeInBits() >= 32)
14797 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14798 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14800 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14801 "Invalid TRUNCATE operation");
14803 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14804 if (VT.getVectorElementType().getSizeInBits() >=8)
14805 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14807 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14808 unsigned NumElts = InVT.getVectorNumElements();
14809 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14810 if (InVT.getSizeInBits() < 512) {
14811 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14812 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14816 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14817 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14818 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14819 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14820 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14821 MachinePointerInfo::getConstantPool(),
14822 false, false, false, Alignment);
14823 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14824 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14825 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14828 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14829 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14830 if (Subtarget->hasInt256()) {
14831 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14832 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14833 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14835 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14836 DAG.getIntPtrConstant(0));
14839 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14840 DAG.getIntPtrConstant(0));
14841 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14842 DAG.getIntPtrConstant(2));
14843 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14844 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14845 static const int ShufMask[] = {0, 2, 4, 6};
14846 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14849 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14850 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14851 if (Subtarget->hasInt256()) {
14852 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14854 SmallVector<SDValue,32> pshufbMask;
14855 for (unsigned i = 0; i < 2; ++i) {
14856 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14857 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14858 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14859 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14860 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14861 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14862 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14863 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14864 for (unsigned j = 0; j < 8; ++j)
14865 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14867 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14868 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14869 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14871 static const int ShufMask[] = {0, 2, -1, -1};
14872 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14874 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14875 DAG.getIntPtrConstant(0));
14876 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14879 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14880 DAG.getIntPtrConstant(0));
14882 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14883 DAG.getIntPtrConstant(4));
14885 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14886 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14888 // The PSHUFB mask:
14889 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14890 -1, -1, -1, -1, -1, -1, -1, -1};
14892 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14893 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14894 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14896 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14897 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14899 // The MOVLHPS Mask:
14900 static const int ShufMask2[] = {0, 1, 4, 5};
14901 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14902 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14905 // Handle truncation of V256 to V128 using shuffles.
14906 if (!VT.is128BitVector() || !InVT.is256BitVector())
14909 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14911 unsigned NumElems = VT.getVectorNumElements();
14912 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14914 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14915 // Prepare truncation shuffle mask
14916 for (unsigned i = 0; i != NumElems; ++i)
14917 MaskVec[i] = i * 2;
14918 SDValue V = DAG.getVectorShuffle(NVT, DL,
14919 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14920 DAG.getUNDEF(NVT), &MaskVec[0]);
14921 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14922 DAG.getIntPtrConstant(0));
14925 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14926 SelectionDAG &DAG) const {
14927 assert(!Op.getSimpleValueType().isVector());
14929 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14930 /*IsSigned=*/ true, /*IsReplace=*/ false);
14931 SDValue FIST = Vals.first, StackSlot = Vals.second;
14932 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14933 if (!FIST.getNode()) return Op;
14935 if (StackSlot.getNode())
14936 // Load the result.
14937 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14938 FIST, StackSlot, MachinePointerInfo(),
14939 false, false, false, 0);
14941 // The node is the result.
14945 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14946 SelectionDAG &DAG) const {
14947 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14948 /*IsSigned=*/ false, /*IsReplace=*/ false);
14949 SDValue FIST = Vals.first, StackSlot = Vals.second;
14950 assert(FIST.getNode() && "Unexpected failure");
14952 if (StackSlot.getNode())
14953 // Load the result.
14954 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14955 FIST, StackSlot, MachinePointerInfo(),
14956 false, false, false, 0);
14958 // The node is the result.
14962 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14964 MVT VT = Op.getSimpleValueType();
14965 SDValue In = Op.getOperand(0);
14966 MVT SVT = In.getSimpleValueType();
14968 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14970 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14971 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14972 In, DAG.getUNDEF(SVT)));
14975 /// The only differences between FABS and FNEG are the mask and the logic op.
14976 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14977 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14978 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14979 "Wrong opcode for lowering FABS or FNEG.");
14981 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14983 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14984 // into an FNABS. We'll lower the FABS after that if it is still in use.
14986 for (SDNode *User : Op->uses())
14987 if (User->getOpcode() == ISD::FNEG)
14990 SDValue Op0 = Op.getOperand(0);
14991 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
14994 MVT VT = Op.getSimpleValueType();
14995 // Assume scalar op for initialization; update for vector if needed.
14996 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
14997 // generate a 16-byte vector constant and logic op even for the scalar case.
14998 // Using a 16-byte mask allows folding the load of the mask with
14999 // the logic op, so it can save (~4 bytes) on code size.
15001 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
15002 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
15003 // decide if we should generate a 16-byte constant mask when we only need 4 or
15004 // 8 bytes for the scalar case.
15005 if (VT.isVector()) {
15006 EltVT = VT.getVectorElementType();
15007 NumElts = VT.getVectorNumElements();
15010 unsigned EltBits = EltVT.getSizeInBits();
15011 LLVMContext *Context = DAG.getContext();
15012 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
15014 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
15015 Constant *C = ConstantInt::get(*Context, MaskElt);
15016 C = ConstantVector::getSplat(NumElts, C);
15017 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15018 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
15019 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
15020 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15021 MachinePointerInfo::getConstantPool(),
15022 false, false, false, Alignment);
15024 if (VT.isVector()) {
15025 // For a vector, cast operands to a vector type, perform the logic op,
15026 // and cast the result back to the original value type.
15027 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
15028 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
15029 SDValue Operand = IsFNABS ?
15030 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
15031 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
15032 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
15033 return DAG.getNode(ISD::BITCAST, dl, VT,
15034 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
15037 // If not vector, then scalar.
15038 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
15039 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
15040 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
15043 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
15044 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15045 LLVMContext *Context = DAG.getContext();
15046 SDValue Op0 = Op.getOperand(0);
15047 SDValue Op1 = Op.getOperand(1);
15049 MVT VT = Op.getSimpleValueType();
15050 MVT SrcVT = Op1.getSimpleValueType();
15052 // If second operand is smaller, extend it first.
15053 if (SrcVT.bitsLT(VT)) {
15054 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
15057 // And if it is bigger, shrink it first.
15058 if (SrcVT.bitsGT(VT)) {
15059 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
15063 // At this point the operands and the result should have the same
15064 // type, and that won't be f80 since that is not custom lowered.
15066 const fltSemantics &Sem =
15067 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
15068 const unsigned SizeInBits = VT.getSizeInBits();
15070 SmallVector<Constant *, 4> CV(
15071 VT == MVT::f64 ? 2 : 4,
15072 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
15074 // First, clear all bits but the sign bit from the second operand (sign).
15075 CV[0] = ConstantFP::get(*Context,
15076 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
15077 Constant *C = ConstantVector::get(CV);
15078 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15079 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
15080 MachinePointerInfo::getConstantPool(),
15081 false, false, false, 16);
15082 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
15084 // Next, clear the sign bit from the first operand (magnitude).
15085 // If it's a constant, we can clear it here.
15086 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
15087 APFloat APF = Op0CN->getValueAPF();
15088 // If the magnitude is a positive zero, the sign bit alone is enough.
15089 if (APF.isPosZero())
15092 CV[0] = ConstantFP::get(*Context, APF);
15094 CV[0] = ConstantFP::get(
15096 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
15098 C = ConstantVector::get(CV);
15099 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15100 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15101 MachinePointerInfo::getConstantPool(),
15102 false, false, false, 16);
15103 // If the magnitude operand wasn't a constant, we need to AND out the sign.
15104 if (!isa<ConstantFPSDNode>(Op0))
15105 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
15107 // OR the magnitude value with the sign bit.
15108 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
15111 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
15112 SDValue N0 = Op.getOperand(0);
15114 MVT VT = Op.getSimpleValueType();
15116 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
15117 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
15118 DAG.getConstant(1, VT));
15119 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
15122 // Check whether an OR'd tree is PTEST-able.
15123 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
15124 SelectionDAG &DAG) {
15125 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
15127 if (!Subtarget->hasSSE41())
15130 if (!Op->hasOneUse())
15133 SDNode *N = Op.getNode();
15136 SmallVector<SDValue, 8> Opnds;
15137 DenseMap<SDValue, unsigned> VecInMap;
15138 SmallVector<SDValue, 8> VecIns;
15139 EVT VT = MVT::Other;
15141 // Recognize a special case where a vector is casted into wide integer to
15143 Opnds.push_back(N->getOperand(0));
15144 Opnds.push_back(N->getOperand(1));
15146 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
15147 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
15148 // BFS traverse all OR'd operands.
15149 if (I->getOpcode() == ISD::OR) {
15150 Opnds.push_back(I->getOperand(0));
15151 Opnds.push_back(I->getOperand(1));
15152 // Re-evaluate the number of nodes to be traversed.
15153 e += 2; // 2 more nodes (LHS and RHS) are pushed.
15157 // Quit if a non-EXTRACT_VECTOR_ELT
15158 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15161 // Quit if without a constant index.
15162 SDValue Idx = I->getOperand(1);
15163 if (!isa<ConstantSDNode>(Idx))
15166 SDValue ExtractedFromVec = I->getOperand(0);
15167 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
15168 if (M == VecInMap.end()) {
15169 VT = ExtractedFromVec.getValueType();
15170 // Quit if not 128/256-bit vector.
15171 if (!VT.is128BitVector() && !VT.is256BitVector())
15173 // Quit if not the same type.
15174 if (VecInMap.begin() != VecInMap.end() &&
15175 VT != VecInMap.begin()->first.getValueType())
15177 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
15178 VecIns.push_back(ExtractedFromVec);
15180 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
15183 assert((VT.is128BitVector() || VT.is256BitVector()) &&
15184 "Not extracted from 128-/256-bit vector.");
15186 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
15188 for (DenseMap<SDValue, unsigned>::const_iterator
15189 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
15190 // Quit if not all elements are used.
15191 if (I->second != FullMask)
15195 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
15197 // Cast all vectors into TestVT for PTEST.
15198 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
15199 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
15201 // If more than one full vectors are evaluated, OR them first before PTEST.
15202 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
15203 // Each iteration will OR 2 nodes and append the result until there is only
15204 // 1 node left, i.e. the final OR'd value of all vectors.
15205 SDValue LHS = VecIns[Slot];
15206 SDValue RHS = VecIns[Slot + 1];
15207 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
15210 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
15211 VecIns.back(), VecIns.back());
15214 /// \brief return true if \c Op has a use that doesn't just read flags.
15215 static bool hasNonFlagsUse(SDValue Op) {
15216 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
15218 SDNode *User = *UI;
15219 unsigned UOpNo = UI.getOperandNo();
15220 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
15221 // Look pass truncate.
15222 UOpNo = User->use_begin().getOperandNo();
15223 User = *User->use_begin();
15226 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
15227 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
15233 /// Emit nodes that will be selected as "test Op0,Op0", or something
15235 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
15236 SelectionDAG &DAG) const {
15237 if (Op.getValueType() == MVT::i1) {
15238 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
15239 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
15240 DAG.getConstant(0, MVT::i8));
15242 // CF and OF aren't always set the way we want. Determine which
15243 // of these we need.
15244 bool NeedCF = false;
15245 bool NeedOF = false;
15248 case X86::COND_A: case X86::COND_AE:
15249 case X86::COND_B: case X86::COND_BE:
15252 case X86::COND_G: case X86::COND_GE:
15253 case X86::COND_L: case X86::COND_LE:
15254 case X86::COND_O: case X86::COND_NO: {
15255 // Check if we really need to set the
15256 // Overflow flag. If NoSignedWrap is present
15257 // that is not actually needed.
15258 switch (Op->getOpcode()) {
15263 const BinaryWithFlagsSDNode *BinNode =
15264 cast<BinaryWithFlagsSDNode>(Op.getNode());
15265 if (BinNode->hasNoSignedWrap())
15275 // See if we can use the EFLAGS value from the operand instead of
15276 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
15277 // we prove that the arithmetic won't overflow, we can't use OF or CF.
15278 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
15279 // Emit a CMP with 0, which is the TEST pattern.
15280 //if (Op.getValueType() == MVT::i1)
15281 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
15282 // DAG.getConstant(0, MVT::i1));
15283 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15284 DAG.getConstant(0, Op.getValueType()));
15286 unsigned Opcode = 0;
15287 unsigned NumOperands = 0;
15289 // Truncate operations may prevent the merge of the SETCC instruction
15290 // and the arithmetic instruction before it. Attempt to truncate the operands
15291 // of the arithmetic instruction and use a reduced bit-width instruction.
15292 bool NeedTruncation = false;
15293 SDValue ArithOp = Op;
15294 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
15295 SDValue Arith = Op->getOperand(0);
15296 // Both the trunc and the arithmetic op need to have one user each.
15297 if (Arith->hasOneUse())
15298 switch (Arith.getOpcode()) {
15305 NeedTruncation = true;
15311 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
15312 // which may be the result of a CAST. We use the variable 'Op', which is the
15313 // non-casted variable when we check for possible users.
15314 switch (ArithOp.getOpcode()) {
15316 // Due to an isel shortcoming, be conservative if this add is likely to be
15317 // selected as part of a load-modify-store instruction. When the root node
15318 // in a match is a store, isel doesn't know how to remap non-chain non-flag
15319 // uses of other nodes in the match, such as the ADD in this case. This
15320 // leads to the ADD being left around and reselected, with the result being
15321 // two adds in the output. Alas, even if none our users are stores, that
15322 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
15323 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
15324 // climbing the DAG back to the root, and it doesn't seem to be worth the
15326 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15327 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15328 if (UI->getOpcode() != ISD::CopyToReg &&
15329 UI->getOpcode() != ISD::SETCC &&
15330 UI->getOpcode() != ISD::STORE)
15333 if (ConstantSDNode *C =
15334 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
15335 // An add of one will be selected as an INC.
15336 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
15337 Opcode = X86ISD::INC;
15342 // An add of negative one (subtract of one) will be selected as a DEC.
15343 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
15344 Opcode = X86ISD::DEC;
15350 // Otherwise use a regular EFLAGS-setting add.
15351 Opcode = X86ISD::ADD;
15356 // If we have a constant logical shift that's only used in a comparison
15357 // against zero turn it into an equivalent AND. This allows turning it into
15358 // a TEST instruction later.
15359 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
15360 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
15361 EVT VT = Op.getValueType();
15362 unsigned BitWidth = VT.getSizeInBits();
15363 unsigned ShAmt = Op->getConstantOperandVal(1);
15364 if (ShAmt >= BitWidth) // Avoid undefined shifts.
15366 APInt Mask = ArithOp.getOpcode() == ISD::SRL
15367 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
15368 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
15369 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
15371 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
15372 DAG.getConstant(Mask, VT));
15373 DAG.ReplaceAllUsesWith(Op, New);
15379 // If the primary and result isn't used, don't bother using X86ISD::AND,
15380 // because a TEST instruction will be better.
15381 if (!hasNonFlagsUse(Op))
15387 // Due to the ISEL shortcoming noted above, be conservative if this op is
15388 // likely to be selected as part of a load-modify-store instruction.
15389 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15390 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15391 if (UI->getOpcode() == ISD::STORE)
15394 // Otherwise use a regular EFLAGS-setting instruction.
15395 switch (ArithOp.getOpcode()) {
15396 default: llvm_unreachable("unexpected operator!");
15397 case ISD::SUB: Opcode = X86ISD::SUB; break;
15398 case ISD::XOR: Opcode = X86ISD::XOR; break;
15399 case ISD::AND: Opcode = X86ISD::AND; break;
15401 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15402 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15403 if (EFLAGS.getNode())
15406 Opcode = X86ISD::OR;
15420 return SDValue(Op.getNode(), 1);
15426 // If we found that truncation is beneficial, perform the truncation and
15428 if (NeedTruncation) {
15429 EVT VT = Op.getValueType();
15430 SDValue WideVal = Op->getOperand(0);
15431 EVT WideVT = WideVal.getValueType();
15432 unsigned ConvertedOp = 0;
15433 // Use a target machine opcode to prevent further DAGCombine
15434 // optimizations that may separate the arithmetic operations
15435 // from the setcc node.
15436 switch (WideVal.getOpcode()) {
15438 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15439 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15440 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15441 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15442 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15446 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15447 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15448 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15449 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15450 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15456 // Emit a CMP with 0, which is the TEST pattern.
15457 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15458 DAG.getConstant(0, Op.getValueType()));
15460 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15461 SmallVector<SDValue, 4> Ops;
15462 for (unsigned i = 0; i != NumOperands; ++i)
15463 Ops.push_back(Op.getOperand(i));
15465 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15466 DAG.ReplaceAllUsesWith(Op, New);
15467 return SDValue(New.getNode(), 1);
15470 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15472 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15473 SDLoc dl, SelectionDAG &DAG) const {
15474 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15475 if (C->getAPIntValue() == 0)
15476 return EmitTest(Op0, X86CC, dl, DAG);
15478 if (Op0.getValueType() == MVT::i1)
15479 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15482 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15483 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15484 // Do the comparison at i32 if it's smaller, besides the Atom case.
15485 // This avoids subregister aliasing issues. Keep the smaller reference
15486 // if we're optimizing for size, however, as that'll allow better folding
15487 // of memory operations.
15488 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15489 !DAG.getMachineFunction().getFunction()->hasFnAttribute(
15490 Attribute::MinSize) &&
15491 !Subtarget->isAtom()) {
15492 unsigned ExtendOp =
15493 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15494 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15495 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15497 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15498 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15499 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15501 return SDValue(Sub.getNode(), 1);
15503 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15506 /// Convert a comparison if required by the subtarget.
15507 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15508 SelectionDAG &DAG) const {
15509 // If the subtarget does not support the FUCOMI instruction, floating-point
15510 // comparisons have to be converted.
15511 if (Subtarget->hasCMov() ||
15512 Cmp.getOpcode() != X86ISD::CMP ||
15513 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15514 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15517 // The instruction selector will select an FUCOM instruction instead of
15518 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15519 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15520 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15522 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15523 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15524 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15525 DAG.getConstant(8, MVT::i8));
15526 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15527 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15530 /// The minimum architected relative accuracy is 2^-12. We need one
15531 /// Newton-Raphson step to have a good float result (24 bits of precision).
15532 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15533 DAGCombinerInfo &DCI,
15534 unsigned &RefinementSteps,
15535 bool &UseOneConstNR) const {
15536 // FIXME: We should use instruction latency models to calculate the cost of
15537 // each potential sequence, but this is very hard to do reliably because
15538 // at least Intel's Core* chips have variable timing based on the number of
15539 // significant digits in the divisor and/or sqrt operand.
15540 if (!Subtarget->useSqrtEst())
15543 EVT VT = Op.getValueType();
15545 // SSE1 has rsqrtss and rsqrtps.
15546 // TODO: Add support for AVX512 (v16f32).
15547 // It is likely not profitable to do this for f64 because a double-precision
15548 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15549 // instructions: convert to single, rsqrtss, convert back to double, refine
15550 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15551 // along with FMA, this could be a throughput win.
15552 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15553 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15554 RefinementSteps = 1;
15555 UseOneConstNR = false;
15556 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15561 /// The minimum architected relative accuracy is 2^-12. We need one
15562 /// Newton-Raphson step to have a good float result (24 bits of precision).
15563 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15564 DAGCombinerInfo &DCI,
15565 unsigned &RefinementSteps) const {
15566 // FIXME: We should use instruction latency models to calculate the cost of
15567 // each potential sequence, but this is very hard to do reliably because
15568 // at least Intel's Core* chips have variable timing based on the number of
15569 // significant digits in the divisor.
15570 if (!Subtarget->useReciprocalEst())
15573 EVT VT = Op.getValueType();
15575 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15576 // TODO: Add support for AVX512 (v16f32).
15577 // It is likely not profitable to do this for f64 because a double-precision
15578 // reciprocal estimate with refinement on x86 prior to FMA requires
15579 // 15 instructions: convert to single, rcpss, convert back to double, refine
15580 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15581 // along with FMA, this could be a throughput win.
15582 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15583 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15584 RefinementSteps = ReciprocalEstimateRefinementSteps;
15585 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15590 static bool isAllOnes(SDValue V) {
15591 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15592 return C && C->isAllOnesValue();
15595 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15596 /// if it's possible.
15597 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15598 SDLoc dl, SelectionDAG &DAG) const {
15599 SDValue Op0 = And.getOperand(0);
15600 SDValue Op1 = And.getOperand(1);
15601 if (Op0.getOpcode() == ISD::TRUNCATE)
15602 Op0 = Op0.getOperand(0);
15603 if (Op1.getOpcode() == ISD::TRUNCATE)
15604 Op1 = Op1.getOperand(0);
15607 if (Op1.getOpcode() == ISD::SHL)
15608 std::swap(Op0, Op1);
15609 if (Op0.getOpcode() == ISD::SHL) {
15610 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15611 if (And00C->getZExtValue() == 1) {
15612 // If we looked past a truncate, check that it's only truncating away
15614 unsigned BitWidth = Op0.getValueSizeInBits();
15615 unsigned AndBitWidth = And.getValueSizeInBits();
15616 if (BitWidth > AndBitWidth) {
15618 DAG.computeKnownBits(Op0, Zeros, Ones);
15619 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15623 RHS = Op0.getOperand(1);
15625 } else if (Op1.getOpcode() == ISD::Constant) {
15626 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15627 uint64_t AndRHSVal = AndRHS->getZExtValue();
15628 SDValue AndLHS = Op0;
15630 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15631 LHS = AndLHS.getOperand(0);
15632 RHS = AndLHS.getOperand(1);
15635 // Use BT if the immediate can't be encoded in a TEST instruction.
15636 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15638 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15642 if (LHS.getNode()) {
15643 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15644 // instruction. Since the shift amount is in-range-or-undefined, we know
15645 // that doing a bittest on the i32 value is ok. We extend to i32 because
15646 // the encoding for the i16 version is larger than the i32 version.
15647 // Also promote i16 to i32 for performance / code size reason.
15648 if (LHS.getValueType() == MVT::i8 ||
15649 LHS.getValueType() == MVT::i16)
15650 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15652 // If the operand types disagree, extend the shift amount to match. Since
15653 // BT ignores high bits (like shifts) we can use anyextend.
15654 if (LHS.getValueType() != RHS.getValueType())
15655 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15657 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15658 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15659 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15660 DAG.getConstant(Cond, MVT::i8), BT);
15666 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15668 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15673 // SSE Condition code mapping:
15682 switch (SetCCOpcode) {
15683 default: llvm_unreachable("Unexpected SETCC condition");
15685 case ISD::SETEQ: SSECC = 0; break;
15687 case ISD::SETGT: Swap = true; // Fallthrough
15689 case ISD::SETOLT: SSECC = 1; break;
15691 case ISD::SETGE: Swap = true; // Fallthrough
15693 case ISD::SETOLE: SSECC = 2; break;
15694 case ISD::SETUO: SSECC = 3; break;
15696 case ISD::SETNE: SSECC = 4; break;
15697 case ISD::SETULE: Swap = true; // Fallthrough
15698 case ISD::SETUGE: SSECC = 5; break;
15699 case ISD::SETULT: Swap = true; // Fallthrough
15700 case ISD::SETUGT: SSECC = 6; break;
15701 case ISD::SETO: SSECC = 7; break;
15703 case ISD::SETONE: SSECC = 8; break;
15706 std::swap(Op0, Op1);
15711 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15712 // ones, and then concatenate the result back.
15713 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15714 MVT VT = Op.getSimpleValueType();
15716 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15717 "Unsupported value type for operation");
15719 unsigned NumElems = VT.getVectorNumElements();
15721 SDValue CC = Op.getOperand(2);
15723 // Extract the LHS vectors
15724 SDValue LHS = Op.getOperand(0);
15725 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15726 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15728 // Extract the RHS vectors
15729 SDValue RHS = Op.getOperand(1);
15730 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15731 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15733 // Issue the operation on the smaller types and concatenate the result back
15734 MVT EltVT = VT.getVectorElementType();
15735 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15736 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15737 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15738 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15741 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15742 const X86Subtarget *Subtarget) {
15743 SDValue Op0 = Op.getOperand(0);
15744 SDValue Op1 = Op.getOperand(1);
15745 SDValue CC = Op.getOperand(2);
15746 MVT VT = Op.getSimpleValueType();
15749 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15750 Op.getValueType().getScalarType() == MVT::i1 &&
15751 "Cannot set masked compare for this operation");
15753 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15755 bool Unsigned = false;
15758 switch (SetCCOpcode) {
15759 default: llvm_unreachable("Unexpected SETCC condition");
15760 case ISD::SETNE: SSECC = 4; break;
15761 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15762 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15763 case ISD::SETLT: Swap = true; //fall-through
15764 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15765 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15766 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15767 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15768 case ISD::SETULE: Unsigned = true; //fall-through
15769 case ISD::SETLE: SSECC = 2; break;
15773 std::swap(Op0, Op1);
15775 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15776 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15777 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15778 DAG.getConstant(SSECC, MVT::i8));
15781 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15782 /// operand \p Op1. If non-trivial (for example because it's not constant)
15783 /// return an empty value.
15784 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15786 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15790 MVT VT = Op1.getSimpleValueType();
15791 MVT EVT = VT.getVectorElementType();
15792 unsigned n = VT.getVectorNumElements();
15793 SmallVector<SDValue, 8> ULTOp1;
15795 for (unsigned i = 0; i < n; ++i) {
15796 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15797 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15800 // Avoid underflow.
15801 APInt Val = Elt->getAPIntValue();
15805 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15808 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15811 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15812 SelectionDAG &DAG) {
15813 SDValue Op0 = Op.getOperand(0);
15814 SDValue Op1 = Op.getOperand(1);
15815 SDValue CC = Op.getOperand(2);
15816 MVT VT = Op.getSimpleValueType();
15817 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15818 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15823 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15824 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15827 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15828 unsigned Opc = X86ISD::CMPP;
15829 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15830 assert(VT.getVectorNumElements() <= 16);
15831 Opc = X86ISD::CMPM;
15833 // In the two special cases we can't handle, emit two comparisons.
15836 unsigned CombineOpc;
15837 if (SetCCOpcode == ISD::SETUEQ) {
15838 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15840 assert(SetCCOpcode == ISD::SETONE);
15841 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15844 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15845 DAG.getConstant(CC0, MVT::i8));
15846 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15847 DAG.getConstant(CC1, MVT::i8));
15848 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15850 // Handle all other FP comparisons here.
15851 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15852 DAG.getConstant(SSECC, MVT::i8));
15855 // Break 256-bit integer vector compare into smaller ones.
15856 if (VT.is256BitVector() && !Subtarget->hasInt256())
15857 return Lower256IntVSETCC(Op, DAG);
15859 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15860 EVT OpVT = Op1.getValueType();
15861 if (Subtarget->hasAVX512()) {
15862 if (Op1.getValueType().is512BitVector() ||
15863 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15864 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15865 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15867 // In AVX-512 architecture setcc returns mask with i1 elements,
15868 // But there is no compare instruction for i8 and i16 elements in KNL.
15869 // We are not talking about 512-bit operands in this case, these
15870 // types are illegal.
15872 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15873 OpVT.getVectorElementType().getSizeInBits() >= 8))
15874 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15875 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15878 // We are handling one of the integer comparisons here. Since SSE only has
15879 // GT and EQ comparisons for integer, swapping operands and multiple
15880 // operations may be required for some comparisons.
15882 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15883 bool Subus = false;
15885 switch (SetCCOpcode) {
15886 default: llvm_unreachable("Unexpected SETCC condition");
15887 case ISD::SETNE: Invert = true;
15888 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15889 case ISD::SETLT: Swap = true;
15890 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15891 case ISD::SETGE: Swap = true;
15892 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15893 Invert = true; break;
15894 case ISD::SETULT: Swap = true;
15895 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15896 FlipSigns = true; break;
15897 case ISD::SETUGE: Swap = true;
15898 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15899 FlipSigns = true; Invert = true; break;
15902 // Special case: Use min/max operations for SETULE/SETUGE
15903 MVT VET = VT.getVectorElementType();
15905 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15906 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15909 switch (SetCCOpcode) {
15911 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15912 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15915 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15918 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15919 if (!MinMax && hasSubus) {
15920 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15922 // t = psubus Op0, Op1
15923 // pcmpeq t, <0..0>
15924 switch (SetCCOpcode) {
15926 case ISD::SETULT: {
15927 // If the comparison is against a constant we can turn this into a
15928 // setule. With psubus, setule does not require a swap. This is
15929 // beneficial because the constant in the register is no longer
15930 // destructed as the destination so it can be hoisted out of a loop.
15931 // Only do this pre-AVX since vpcmp* is no longer destructive.
15932 if (Subtarget->hasAVX())
15934 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
15935 if (ULEOp1.getNode()) {
15937 Subus = true; Invert = false; Swap = false;
15941 // Psubus is better than flip-sign because it requires no inversion.
15942 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15943 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15947 Opc = X86ISD::SUBUS;
15953 std::swap(Op0, Op1);
15955 // Check that the operation in question is available (most are plain SSE2,
15956 // but PCMPGTQ and PCMPEQQ have different requirements).
15957 if (VT == MVT::v2i64) {
15958 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
15959 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
15961 // First cast everything to the right type.
15962 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15963 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15965 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15966 // bits of the inputs before performing those operations. The lower
15967 // compare is always unsigned.
15970 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
15972 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
15973 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
15974 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
15975 Sign, Zero, Sign, Zero);
15977 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15978 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15980 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15981 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15982 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15984 // Create masks for only the low parts/high parts of the 64 bit integers.
15985 static const int MaskHi[] = { 1, 1, 3, 3 };
15986 static const int MaskLo[] = { 0, 0, 2, 2 };
15987 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
15988 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
15989 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
15991 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
15992 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
15995 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15997 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
16000 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
16001 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
16002 // pcmpeqd + pshufd + pand.
16003 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
16005 // First cast everything to the right type.
16006 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
16007 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
16010 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
16012 // Make sure the lower and upper halves are both all-ones.
16013 static const int Mask[] = { 1, 0, 3, 2 };
16014 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
16015 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
16018 Result = DAG.getNOT(dl, Result, MVT::v4i32);
16020 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
16024 // Since SSE has no unsigned integer comparisons, we need to flip the sign
16025 // bits of the inputs before performing those operations.
16027 EVT EltVT = VT.getVectorElementType();
16028 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
16029 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
16030 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
16033 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
16035 // If the logical-not of the result is required, perform that now.
16037 Result = DAG.getNOT(dl, Result, VT);
16040 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
16043 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
16044 getZeroVector(VT, Subtarget, DAG, dl));
16049 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
16051 MVT VT = Op.getSimpleValueType();
16053 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
16055 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
16056 && "SetCC type must be 8-bit or 1-bit integer");
16057 SDValue Op0 = Op.getOperand(0);
16058 SDValue Op1 = Op.getOperand(1);
16060 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
16062 // Optimize to BT if possible.
16063 // Lower (X & (1 << N)) == 0 to BT(X, N).
16064 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
16065 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
16066 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
16067 Op1.getOpcode() == ISD::Constant &&
16068 cast<ConstantSDNode>(Op1)->isNullValue() &&
16069 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16070 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
16071 if (NewSetCC.getNode()) {
16073 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
16078 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
16080 if (Op1.getOpcode() == ISD::Constant &&
16081 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
16082 cast<ConstantSDNode>(Op1)->isNullValue()) &&
16083 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16085 // If the input is a setcc, then reuse the input setcc or use a new one with
16086 // the inverted condition.
16087 if (Op0.getOpcode() == X86ISD::SETCC) {
16088 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
16089 bool Invert = (CC == ISD::SETNE) ^
16090 cast<ConstantSDNode>(Op1)->isNullValue();
16094 CCode = X86::GetOppositeBranchCondition(CCode);
16095 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16096 DAG.getConstant(CCode, MVT::i8),
16097 Op0.getOperand(1));
16099 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16103 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
16104 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
16105 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16107 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
16108 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
16111 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
16112 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
16113 if (X86CC == X86::COND_INVALID)
16116 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
16117 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
16118 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16119 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
16121 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16125 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
16126 static bool isX86LogicalCmp(SDValue Op) {
16127 unsigned Opc = Op.getNode()->getOpcode();
16128 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
16129 Opc == X86ISD::SAHF)
16131 if (Op.getResNo() == 1 &&
16132 (Opc == X86ISD::ADD ||
16133 Opc == X86ISD::SUB ||
16134 Opc == X86ISD::ADC ||
16135 Opc == X86ISD::SBB ||
16136 Opc == X86ISD::SMUL ||
16137 Opc == X86ISD::UMUL ||
16138 Opc == X86ISD::INC ||
16139 Opc == X86ISD::DEC ||
16140 Opc == X86ISD::OR ||
16141 Opc == X86ISD::XOR ||
16142 Opc == X86ISD::AND))
16145 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
16151 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
16152 if (V.getOpcode() != ISD::TRUNCATE)
16155 SDValue VOp0 = V.getOperand(0);
16156 unsigned InBits = VOp0.getValueSizeInBits();
16157 unsigned Bits = V.getValueSizeInBits();
16158 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
16161 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
16162 bool addTest = true;
16163 SDValue Cond = Op.getOperand(0);
16164 SDValue Op1 = Op.getOperand(1);
16165 SDValue Op2 = Op.getOperand(2);
16167 EVT VT = Op1.getValueType();
16170 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
16171 // are available. Otherwise fp cmovs get lowered into a less efficient branch
16172 // sequence later on.
16173 if (Cond.getOpcode() == ISD::SETCC &&
16174 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
16175 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
16176 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
16177 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
16178 int SSECC = translateX86FSETCC(
16179 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
16182 if (Subtarget->hasAVX512()) {
16183 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
16184 DAG.getConstant(SSECC, MVT::i8));
16185 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
16187 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
16188 DAG.getConstant(SSECC, MVT::i8));
16189 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
16190 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
16191 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
16195 if (Cond.getOpcode() == ISD::SETCC) {
16196 SDValue NewCond = LowerSETCC(Cond, DAG);
16197 if (NewCond.getNode())
16201 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
16202 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
16203 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
16204 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
16205 if (Cond.getOpcode() == X86ISD::SETCC &&
16206 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
16207 isZero(Cond.getOperand(1).getOperand(1))) {
16208 SDValue Cmp = Cond.getOperand(1);
16210 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
16212 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
16213 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
16214 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
16216 SDValue CmpOp0 = Cmp.getOperand(0);
16217 // Apply further optimizations for special cases
16218 // (select (x != 0), -1, 0) -> neg & sbb
16219 // (select (x == 0), 0, -1) -> neg & sbb
16220 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
16221 if (YC->isNullValue() &&
16222 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
16223 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
16224 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
16225 DAG.getConstant(0, CmpOp0.getValueType()),
16227 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16228 DAG.getConstant(X86::COND_B, MVT::i8),
16229 SDValue(Neg.getNode(), 1));
16233 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
16234 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
16235 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16237 SDValue Res = // Res = 0 or -1.
16238 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16239 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
16241 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
16242 Res = DAG.getNOT(DL, Res, Res.getValueType());
16244 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
16245 if (!N2C || !N2C->isNullValue())
16246 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
16251 // Look past (and (setcc_carry (cmp ...)), 1).
16252 if (Cond.getOpcode() == ISD::AND &&
16253 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16254 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16255 if (C && C->getAPIntValue() == 1)
16256 Cond = Cond.getOperand(0);
16259 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16260 // setting operand in place of the X86ISD::SETCC.
16261 unsigned CondOpcode = Cond.getOpcode();
16262 if (CondOpcode == X86ISD::SETCC ||
16263 CondOpcode == X86ISD::SETCC_CARRY) {
16264 CC = Cond.getOperand(0);
16266 SDValue Cmp = Cond.getOperand(1);
16267 unsigned Opc = Cmp.getOpcode();
16268 MVT VT = Op.getSimpleValueType();
16270 bool IllegalFPCMov = false;
16271 if (VT.isFloatingPoint() && !VT.isVector() &&
16272 !isScalarFPTypeInSSEReg(VT)) // FPStack?
16273 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
16275 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
16276 Opc == X86ISD::BT) { // FIXME
16280 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16281 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16282 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16283 Cond.getOperand(0).getValueType() != MVT::i8)) {
16284 SDValue LHS = Cond.getOperand(0);
16285 SDValue RHS = Cond.getOperand(1);
16286 unsigned X86Opcode;
16289 switch (CondOpcode) {
16290 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16291 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16292 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16293 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16294 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16295 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16296 default: llvm_unreachable("unexpected overflowing operator");
16298 if (CondOpcode == ISD::UMULO)
16299 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16302 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16304 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
16306 if (CondOpcode == ISD::UMULO)
16307 Cond = X86Op.getValue(2);
16309 Cond = X86Op.getValue(1);
16311 CC = DAG.getConstant(X86Cond, MVT::i8);
16316 // Look pass the truncate if the high bits are known zero.
16317 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16318 Cond = Cond.getOperand(0);
16320 // We know the result of AND is compared against zero. Try to match
16322 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16323 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
16324 if (NewSetCC.getNode()) {
16325 CC = NewSetCC.getOperand(0);
16326 Cond = NewSetCC.getOperand(1);
16333 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16334 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
16337 // a < b ? -1 : 0 -> RES = ~setcc_carry
16338 // a < b ? 0 : -1 -> RES = setcc_carry
16339 // a >= b ? -1 : 0 -> RES = setcc_carry
16340 // a >= b ? 0 : -1 -> RES = ~setcc_carry
16341 if (Cond.getOpcode() == X86ISD::SUB) {
16342 Cond = ConvertCmpIfNecessary(Cond, DAG);
16343 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
16345 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
16346 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
16347 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16348 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
16349 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
16350 return DAG.getNOT(DL, Res, Res.getValueType());
16355 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
16356 // widen the cmov and push the truncate through. This avoids introducing a new
16357 // branch during isel and doesn't add any extensions.
16358 if (Op.getValueType() == MVT::i8 &&
16359 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
16360 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
16361 if (T1.getValueType() == T2.getValueType() &&
16362 // Blacklist CopyFromReg to avoid partial register stalls.
16363 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
16364 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
16365 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
16366 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
16370 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
16371 // condition is true.
16372 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
16373 SDValue Ops[] = { Op2, Op1, CC, Cond };
16374 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
16377 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
16378 SelectionDAG &DAG) {
16379 MVT VT = Op->getSimpleValueType(0);
16380 SDValue In = Op->getOperand(0);
16381 MVT InVT = In.getSimpleValueType();
16382 MVT VTElt = VT.getVectorElementType();
16383 MVT InVTElt = InVT.getVectorElementType();
16387 if ((InVTElt == MVT::i1) &&
16388 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
16389 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16391 ((Subtarget->hasBWI() && VT.is512BitVector() &&
16392 VTElt.getSizeInBits() <= 16)) ||
16394 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16395 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16397 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16398 VTElt.getSizeInBits() >= 32))))
16399 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16401 unsigned int NumElts = VT.getVectorNumElements();
16403 if (NumElts != 8 && NumElts != 16)
16406 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16407 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16408 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16409 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16412 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16413 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16415 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16416 Constant *C = ConstantInt::get(*DAG.getContext(),
16417 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16419 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16420 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16421 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16422 MachinePointerInfo::getConstantPool(),
16423 false, false, false, Alignment);
16424 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16425 if (VT.is512BitVector())
16427 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16430 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16431 SelectionDAG &DAG) {
16432 MVT VT = Op->getSimpleValueType(0);
16433 SDValue In = Op->getOperand(0);
16434 MVT InVT = In.getSimpleValueType();
16437 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16438 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16440 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16441 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16442 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16445 if (Subtarget->hasInt256())
16446 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16448 // Optimize vectors in AVX mode
16449 // Sign extend v8i16 to v8i32 and
16452 // Divide input vector into two parts
16453 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16454 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16455 // concat the vectors to original VT
16457 unsigned NumElems = InVT.getVectorNumElements();
16458 SDValue Undef = DAG.getUNDEF(InVT);
16460 SmallVector<int,8> ShufMask1(NumElems, -1);
16461 for (unsigned i = 0; i != NumElems/2; ++i)
16464 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16466 SmallVector<int,8> ShufMask2(NumElems, -1);
16467 for (unsigned i = 0; i != NumElems/2; ++i)
16468 ShufMask2[i] = i + NumElems/2;
16470 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16472 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16473 VT.getVectorNumElements()/2);
16475 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16476 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16478 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16481 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16482 // may emit an illegal shuffle but the expansion is still better than scalar
16483 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16484 // we'll emit a shuffle and a arithmetic shift.
16485 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
16486 // TODO: It is possible to support ZExt by zeroing the undef values during
16487 // the shuffle phase or after the shuffle.
16488 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16489 SelectionDAG &DAG) {
16490 MVT RegVT = Op.getSimpleValueType();
16491 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16492 assert(RegVT.isInteger() &&
16493 "We only custom lower integer vector sext loads.");
16495 // Nothing useful we can do without SSE2 shuffles.
16496 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16498 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16500 EVT MemVT = Ld->getMemoryVT();
16501 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16502 unsigned RegSz = RegVT.getSizeInBits();
16504 ISD::LoadExtType Ext = Ld->getExtensionType();
16506 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16507 && "Only anyext and sext are currently implemented.");
16508 assert(MemVT != RegVT && "Cannot extend to the same type");
16509 assert(MemVT.isVector() && "Must load a vector from memory");
16511 unsigned NumElems = RegVT.getVectorNumElements();
16512 unsigned MemSz = MemVT.getSizeInBits();
16513 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16515 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16516 // The only way in which we have a legal 256-bit vector result but not the
16517 // integer 256-bit operations needed to directly lower a sextload is if we
16518 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16519 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16520 // correctly legalized. We do this late to allow the canonical form of
16521 // sextload to persist throughout the rest of the DAG combiner -- it wants
16522 // to fold together any extensions it can, and so will fuse a sign_extend
16523 // of an sextload into a sextload targeting a wider value.
16525 if (MemSz == 128) {
16526 // Just switch this to a normal load.
16527 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16528 "it must be a legal 128-bit vector "
16530 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16531 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16532 Ld->isInvariant(), Ld->getAlignment());
16534 assert(MemSz < 128 &&
16535 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16536 // Do an sext load to a 128-bit vector type. We want to use the same
16537 // number of elements, but elements half as wide. This will end up being
16538 // recursively lowered by this routine, but will succeed as we definitely
16539 // have all the necessary features if we're using AVX1.
16541 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16542 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16544 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16545 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16546 Ld->isNonTemporal(), Ld->isInvariant(),
16547 Ld->getAlignment());
16550 // Replace chain users with the new chain.
16551 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16552 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16554 // Finally, do a normal sign-extend to the desired register.
16555 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16558 // All sizes must be a power of two.
16559 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16560 "Non-power-of-two elements are not custom lowered!");
16562 // Attempt to load the original value using scalar loads.
16563 // Find the largest scalar type that divides the total loaded size.
16564 MVT SclrLoadTy = MVT::i8;
16565 for (MVT Tp : MVT::integer_valuetypes()) {
16566 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16571 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16572 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16574 SclrLoadTy = MVT::f64;
16576 // Calculate the number of scalar loads that we need to perform
16577 // in order to load our vector from memory.
16578 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16580 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16581 "Can only lower sext loads with a single scalar load!");
16583 unsigned loadRegZize = RegSz;
16584 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16587 // Represent our vector as a sequence of elements which are the
16588 // largest scalar that we can load.
16589 EVT LoadUnitVecVT = EVT::getVectorVT(
16590 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16592 // Represent the data using the same element type that is stored in
16593 // memory. In practice, we ''widen'' MemVT.
16595 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16596 loadRegZize / MemVT.getScalarType().getSizeInBits());
16598 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16599 "Invalid vector type");
16601 // We can't shuffle using an illegal type.
16602 assert(TLI.isTypeLegal(WideVecVT) &&
16603 "We only lower types that form legal widened vector types");
16605 SmallVector<SDValue, 8> Chains;
16606 SDValue Ptr = Ld->getBasePtr();
16607 SDValue Increment =
16608 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16609 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16611 for (unsigned i = 0; i < NumLoads; ++i) {
16612 // Perform a single load.
16613 SDValue ScalarLoad =
16614 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16615 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16616 Ld->getAlignment());
16617 Chains.push_back(ScalarLoad.getValue(1));
16618 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16619 // another round of DAGCombining.
16621 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16623 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16624 ScalarLoad, DAG.getIntPtrConstant(i));
16626 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16629 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16631 // Bitcast the loaded value to a vector of the original element type, in
16632 // the size of the target vector type.
16633 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16634 unsigned SizeRatio = RegSz / MemSz;
16636 if (Ext == ISD::SEXTLOAD) {
16637 // If we have SSE4.1, we can directly emit a VSEXT node.
16638 if (Subtarget->hasSSE41()) {
16639 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16640 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16644 // Otherwise we'll shuffle the small elements in the high bits of the
16645 // larger type and perform an arithmetic shift. If the shift is not legal
16646 // it's better to scalarize.
16647 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16648 "We can't implement a sext load without an arithmetic right shift!");
16650 // Redistribute the loaded elements into the different locations.
16651 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16652 for (unsigned i = 0; i != NumElems; ++i)
16653 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16655 SDValue Shuff = DAG.getVectorShuffle(
16656 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16658 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16660 // Build the arithmetic shift.
16661 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16662 MemVT.getVectorElementType().getSizeInBits();
16664 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16666 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16670 // Redistribute the loaded elements into the different locations.
16671 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16672 for (unsigned i = 0; i != NumElems; ++i)
16673 ShuffleVec[i * SizeRatio] = i;
16675 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16676 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16678 // Bitcast to the requested type.
16679 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16680 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16684 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16685 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16686 // from the AND / OR.
16687 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16688 Opc = Op.getOpcode();
16689 if (Opc != ISD::OR && Opc != ISD::AND)
16691 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16692 Op.getOperand(0).hasOneUse() &&
16693 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16694 Op.getOperand(1).hasOneUse());
16697 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16698 // 1 and that the SETCC node has a single use.
16699 static bool isXor1OfSetCC(SDValue Op) {
16700 if (Op.getOpcode() != ISD::XOR)
16702 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16703 if (N1C && N1C->getAPIntValue() == 1) {
16704 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16705 Op.getOperand(0).hasOneUse();
16710 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16711 bool addTest = true;
16712 SDValue Chain = Op.getOperand(0);
16713 SDValue Cond = Op.getOperand(1);
16714 SDValue Dest = Op.getOperand(2);
16717 bool Inverted = false;
16719 if (Cond.getOpcode() == ISD::SETCC) {
16720 // Check for setcc([su]{add,sub,mul}o == 0).
16721 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16722 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16723 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16724 Cond.getOperand(0).getResNo() == 1 &&
16725 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16726 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16727 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16728 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16729 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16730 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16732 Cond = Cond.getOperand(0);
16734 SDValue NewCond = LowerSETCC(Cond, DAG);
16735 if (NewCond.getNode())
16740 // FIXME: LowerXALUO doesn't handle these!!
16741 else if (Cond.getOpcode() == X86ISD::ADD ||
16742 Cond.getOpcode() == X86ISD::SUB ||
16743 Cond.getOpcode() == X86ISD::SMUL ||
16744 Cond.getOpcode() == X86ISD::UMUL)
16745 Cond = LowerXALUO(Cond, DAG);
16748 // Look pass (and (setcc_carry (cmp ...)), 1).
16749 if (Cond.getOpcode() == ISD::AND &&
16750 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16751 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16752 if (C && C->getAPIntValue() == 1)
16753 Cond = Cond.getOperand(0);
16756 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16757 // setting operand in place of the X86ISD::SETCC.
16758 unsigned CondOpcode = Cond.getOpcode();
16759 if (CondOpcode == X86ISD::SETCC ||
16760 CondOpcode == X86ISD::SETCC_CARRY) {
16761 CC = Cond.getOperand(0);
16763 SDValue Cmp = Cond.getOperand(1);
16764 unsigned Opc = Cmp.getOpcode();
16765 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16766 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16770 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16774 // These can only come from an arithmetic instruction with overflow,
16775 // e.g. SADDO, UADDO.
16776 Cond = Cond.getNode()->getOperand(1);
16782 CondOpcode = Cond.getOpcode();
16783 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16784 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16785 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16786 Cond.getOperand(0).getValueType() != MVT::i8)) {
16787 SDValue LHS = Cond.getOperand(0);
16788 SDValue RHS = Cond.getOperand(1);
16789 unsigned X86Opcode;
16792 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16793 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16795 switch (CondOpcode) {
16796 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16798 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16800 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16803 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16804 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16806 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16808 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16811 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16812 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16813 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16814 default: llvm_unreachable("unexpected overflowing operator");
16817 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16818 if (CondOpcode == ISD::UMULO)
16819 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16822 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16824 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16826 if (CondOpcode == ISD::UMULO)
16827 Cond = X86Op.getValue(2);
16829 Cond = X86Op.getValue(1);
16831 CC = DAG.getConstant(X86Cond, MVT::i8);
16835 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16836 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16837 if (CondOpc == ISD::OR) {
16838 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16839 // two branches instead of an explicit OR instruction with a
16841 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16842 isX86LogicalCmp(Cmp)) {
16843 CC = Cond.getOperand(0).getOperand(0);
16844 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16845 Chain, Dest, CC, Cmp);
16846 CC = Cond.getOperand(1).getOperand(0);
16850 } else { // ISD::AND
16851 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16852 // two branches instead of an explicit AND instruction with a
16853 // separate test. However, we only do this if this block doesn't
16854 // have a fall-through edge, because this requires an explicit
16855 // jmp when the condition is false.
16856 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16857 isX86LogicalCmp(Cmp) &&
16858 Op.getNode()->hasOneUse()) {
16859 X86::CondCode CCode =
16860 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16861 CCode = X86::GetOppositeBranchCondition(CCode);
16862 CC = DAG.getConstant(CCode, MVT::i8);
16863 SDNode *User = *Op.getNode()->use_begin();
16864 // Look for an unconditional branch following this conditional branch.
16865 // We need this because we need to reverse the successors in order
16866 // to implement FCMP_OEQ.
16867 if (User->getOpcode() == ISD::BR) {
16868 SDValue FalseBB = User->getOperand(1);
16870 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16871 assert(NewBR == User);
16875 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16876 Chain, Dest, CC, Cmp);
16877 X86::CondCode CCode =
16878 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16879 CCode = X86::GetOppositeBranchCondition(CCode);
16880 CC = DAG.getConstant(CCode, MVT::i8);
16886 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16887 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16888 // It should be transformed during dag combiner except when the condition
16889 // is set by a arithmetics with overflow node.
16890 X86::CondCode CCode =
16891 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16892 CCode = X86::GetOppositeBranchCondition(CCode);
16893 CC = DAG.getConstant(CCode, MVT::i8);
16894 Cond = Cond.getOperand(0).getOperand(1);
16896 } else if (Cond.getOpcode() == ISD::SETCC &&
16897 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16898 // For FCMP_OEQ, we can emit
16899 // two branches instead of an explicit AND instruction with a
16900 // separate test. However, we only do this if this block doesn't
16901 // have a fall-through edge, because this requires an explicit
16902 // jmp when the condition is false.
16903 if (Op.getNode()->hasOneUse()) {
16904 SDNode *User = *Op.getNode()->use_begin();
16905 // Look for an unconditional branch following this conditional branch.
16906 // We need this because we need to reverse the successors in order
16907 // to implement FCMP_OEQ.
16908 if (User->getOpcode() == ISD::BR) {
16909 SDValue FalseBB = User->getOperand(1);
16911 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16912 assert(NewBR == User);
16916 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16917 Cond.getOperand(0), Cond.getOperand(1));
16918 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16919 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16920 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16921 Chain, Dest, CC, Cmp);
16922 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16927 } else if (Cond.getOpcode() == ISD::SETCC &&
16928 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16929 // For FCMP_UNE, we can emit
16930 // two branches instead of an explicit AND instruction with a
16931 // separate test. However, we only do this if this block doesn't
16932 // have a fall-through edge, because this requires an explicit
16933 // jmp when the condition is false.
16934 if (Op.getNode()->hasOneUse()) {
16935 SDNode *User = *Op.getNode()->use_begin();
16936 // Look for an unconditional branch following this conditional branch.
16937 // We need this because we need to reverse the successors in order
16938 // to implement FCMP_UNE.
16939 if (User->getOpcode() == ISD::BR) {
16940 SDValue FalseBB = User->getOperand(1);
16942 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16943 assert(NewBR == User);
16946 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16947 Cond.getOperand(0), Cond.getOperand(1));
16948 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16949 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16950 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16951 Chain, Dest, CC, Cmp);
16952 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
16962 // Look pass the truncate if the high bits are known zero.
16963 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16964 Cond = Cond.getOperand(0);
16966 // We know the result of AND is compared against zero. Try to match
16968 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16969 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
16970 if (NewSetCC.getNode()) {
16971 CC = NewSetCC.getOperand(0);
16972 Cond = NewSetCC.getOperand(1);
16979 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16980 CC = DAG.getConstant(X86Cond, MVT::i8);
16981 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16983 Cond = ConvertCmpIfNecessary(Cond, DAG);
16984 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16985 Chain, Dest, CC, Cond);
16988 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
16989 // Calls to _alloca are needed to probe the stack when allocating more than 4k
16990 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
16991 // that the guard pages used by the OS virtual memory manager are allocated in
16992 // correct sequence.
16994 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
16995 SelectionDAG &DAG) const {
16996 MachineFunction &MF = DAG.getMachineFunction();
16997 bool SplitStack = MF.shouldSplitStack();
16998 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
17003 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17004 SDNode* Node = Op.getNode();
17006 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
17007 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
17008 " not tell us which reg is the stack pointer!");
17009 EVT VT = Node->getValueType(0);
17010 SDValue Tmp1 = SDValue(Node, 0);
17011 SDValue Tmp2 = SDValue(Node, 1);
17012 SDValue Tmp3 = Node->getOperand(2);
17013 SDValue Chain = Tmp1.getOperand(0);
17015 // Chain the dynamic stack allocation so that it doesn't modify the stack
17016 // pointer when other instructions are using the stack.
17017 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
17020 SDValue Size = Tmp2.getOperand(1);
17021 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
17022 Chain = SP.getValue(1);
17023 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
17024 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
17025 unsigned StackAlign = TFI.getStackAlignment();
17026 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
17027 if (Align > StackAlign)
17028 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
17029 DAG.getConstant(-(uint64_t)Align, VT));
17030 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
17032 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
17033 DAG.getIntPtrConstant(0, true), SDValue(),
17036 SDValue Ops[2] = { Tmp1, Tmp2 };
17037 return DAG.getMergeValues(Ops, dl);
17041 SDValue Chain = Op.getOperand(0);
17042 SDValue Size = Op.getOperand(1);
17043 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
17044 EVT VT = Op.getNode()->getValueType(0);
17046 bool Is64Bit = Subtarget->is64Bit();
17047 EVT SPTy = getPointerTy();
17050 MachineRegisterInfo &MRI = MF.getRegInfo();
17053 // The 64 bit implementation of segmented stacks needs to clobber both r10
17054 // r11. This makes it impossible to use it along with nested parameters.
17055 const Function *F = MF.getFunction();
17057 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
17059 if (I->hasNestAttr())
17060 report_fatal_error("Cannot use segmented stacks with functions that "
17061 "have nested arguments.");
17064 const TargetRegisterClass *AddrRegClass =
17065 getRegClassFor(getPointerTy());
17066 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
17067 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
17068 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
17069 DAG.getRegister(Vreg, SPTy));
17070 SDValue Ops1[2] = { Value, Chain };
17071 return DAG.getMergeValues(Ops1, dl);
17074 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
17076 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
17077 Flag = Chain.getValue(1);
17078 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17080 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
17082 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17083 unsigned SPReg = RegInfo->getStackRegister();
17084 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
17085 Chain = SP.getValue(1);
17088 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
17089 DAG.getConstant(-(uint64_t)Align, VT));
17090 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
17093 SDValue Ops1[2] = { SP, Chain };
17094 return DAG.getMergeValues(Ops1, dl);
17098 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
17099 MachineFunction &MF = DAG.getMachineFunction();
17100 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
17102 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17105 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
17106 // vastart just stores the address of the VarArgsFrameIndex slot into the
17107 // memory location argument.
17108 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17110 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
17111 MachinePointerInfo(SV), false, false, 0);
17115 // gp_offset (0 - 6 * 8)
17116 // fp_offset (48 - 48 + 8 * 16)
17117 // overflow_arg_area (point to parameters coming in memory).
17119 SmallVector<SDValue, 8> MemOps;
17120 SDValue FIN = Op.getOperand(1);
17122 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
17123 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
17125 FIN, MachinePointerInfo(SV), false, false, 0);
17126 MemOps.push_back(Store);
17129 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17130 FIN, DAG.getIntPtrConstant(4));
17131 Store = DAG.getStore(Op.getOperand(0), DL,
17132 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
17134 FIN, MachinePointerInfo(SV, 4), false, false, 0);
17135 MemOps.push_back(Store);
17137 // Store ptr to overflow_arg_area
17138 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17139 FIN, DAG.getIntPtrConstant(4));
17140 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17142 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
17143 MachinePointerInfo(SV, 8),
17145 MemOps.push_back(Store);
17147 // Store ptr to reg_save_area.
17148 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17149 FIN, DAG.getIntPtrConstant(8));
17150 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
17152 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
17153 MachinePointerInfo(SV, 16), false, false, 0);
17154 MemOps.push_back(Store);
17155 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
17158 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
17159 assert(Subtarget->is64Bit() &&
17160 "LowerVAARG only handles 64-bit va_arg!");
17161 assert((Subtarget->isTargetLinux() ||
17162 Subtarget->isTargetDarwin()) &&
17163 "Unhandled target in LowerVAARG");
17164 assert(Op.getNode()->getNumOperands() == 4);
17165 SDValue Chain = Op.getOperand(0);
17166 SDValue SrcPtr = Op.getOperand(1);
17167 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17168 unsigned Align = Op.getConstantOperandVal(3);
17171 EVT ArgVT = Op.getNode()->getValueType(0);
17172 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
17173 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
17176 // Decide which area this value should be read from.
17177 // TODO: Implement the AMD64 ABI in its entirety. This simple
17178 // selection mechanism works only for the basic types.
17179 if (ArgVT == MVT::f80) {
17180 llvm_unreachable("va_arg for f80 not yet implemented");
17181 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
17182 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
17183 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
17184 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
17186 llvm_unreachable("Unhandled argument type in LowerVAARG");
17189 if (ArgMode == 2) {
17190 // Sanity Check: Make sure using fp_offset makes sense.
17191 assert(!DAG.getTarget().Options.UseSoftFloat &&
17192 !(DAG.getMachineFunction().getFunction()->hasFnAttribute(
17193 Attribute::NoImplicitFloat)) &&
17194 Subtarget->hasSSE1());
17197 // Insert VAARG_64 node into the DAG
17198 // VAARG_64 returns two values: Variable Argument Address, Chain
17199 SmallVector<SDValue, 11> InstOps;
17200 InstOps.push_back(Chain);
17201 InstOps.push_back(SrcPtr);
17202 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
17203 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
17204 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
17205 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
17206 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
17207 VTs, InstOps, MVT::i64,
17208 MachinePointerInfo(SV),
17210 /*Volatile=*/false,
17212 /*WriteMem=*/true);
17213 Chain = VAARG.getValue(1);
17215 // Load the next argument and return it
17216 return DAG.getLoad(ArgVT, dl,
17219 MachinePointerInfo(),
17220 false, false, false, 0);
17223 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
17224 SelectionDAG &DAG) {
17225 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
17226 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
17227 SDValue Chain = Op.getOperand(0);
17228 SDValue DstPtr = Op.getOperand(1);
17229 SDValue SrcPtr = Op.getOperand(2);
17230 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
17231 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17234 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
17235 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
17237 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
17240 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
17241 // amount is a constant. Takes immediate version of shift as input.
17242 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
17243 SDValue SrcOp, uint64_t ShiftAmt,
17244 SelectionDAG &DAG) {
17245 MVT ElementType = VT.getVectorElementType();
17247 // Fold this packed shift into its first operand if ShiftAmt is 0.
17251 // Check for ShiftAmt >= element width
17252 if (ShiftAmt >= ElementType.getSizeInBits()) {
17253 if (Opc == X86ISD::VSRAI)
17254 ShiftAmt = ElementType.getSizeInBits() - 1;
17256 return DAG.getConstant(0, VT);
17259 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
17260 && "Unknown target vector shift-by-constant node");
17262 // Fold this packed vector shift into a build vector if SrcOp is a
17263 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
17264 if (VT == SrcOp.getSimpleValueType() &&
17265 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
17266 SmallVector<SDValue, 8> Elts;
17267 unsigned NumElts = SrcOp->getNumOperands();
17268 ConstantSDNode *ND;
17271 default: llvm_unreachable(nullptr);
17272 case X86ISD::VSHLI:
17273 for (unsigned i=0; i!=NumElts; ++i) {
17274 SDValue CurrentOp = SrcOp->getOperand(i);
17275 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17276 Elts.push_back(CurrentOp);
17279 ND = cast<ConstantSDNode>(CurrentOp);
17280 const APInt &C = ND->getAPIntValue();
17281 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
17284 case X86ISD::VSRLI:
17285 for (unsigned i=0; i!=NumElts; ++i) {
17286 SDValue CurrentOp = SrcOp->getOperand(i);
17287 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17288 Elts.push_back(CurrentOp);
17291 ND = cast<ConstantSDNode>(CurrentOp);
17292 const APInt &C = ND->getAPIntValue();
17293 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
17296 case X86ISD::VSRAI:
17297 for (unsigned i=0; i!=NumElts; ++i) {
17298 SDValue CurrentOp = SrcOp->getOperand(i);
17299 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17300 Elts.push_back(CurrentOp);
17303 ND = cast<ConstantSDNode>(CurrentOp);
17304 const APInt &C = ND->getAPIntValue();
17305 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
17310 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
17313 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
17316 // getTargetVShiftNode - Handle vector element shifts where the shift amount
17317 // may or may not be a constant. Takes immediate version of shift as input.
17318 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
17319 SDValue SrcOp, SDValue ShAmt,
17320 SelectionDAG &DAG) {
17321 MVT SVT = ShAmt.getSimpleValueType();
17322 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
17324 // Catch shift-by-constant.
17325 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
17326 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
17327 CShAmt->getZExtValue(), DAG);
17329 // Change opcode to non-immediate version
17331 default: llvm_unreachable("Unknown target vector shift node");
17332 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
17333 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
17334 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
17337 const X86Subtarget &Subtarget =
17338 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17339 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
17340 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
17341 // Let the shuffle legalizer expand this shift amount node.
17342 SDValue Op0 = ShAmt.getOperand(0);
17343 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
17344 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
17346 // Need to build a vector containing shift amount.
17347 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
17348 SmallVector<SDValue, 4> ShOps;
17349 ShOps.push_back(ShAmt);
17350 if (SVT == MVT::i32) {
17351 ShOps.push_back(DAG.getConstant(0, SVT));
17352 ShOps.push_back(DAG.getUNDEF(SVT));
17354 ShOps.push_back(DAG.getUNDEF(SVT));
17356 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
17357 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
17360 // The return type has to be a 128-bit type with the same element
17361 // type as the input type.
17362 MVT EltVT = VT.getVectorElementType();
17363 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
17365 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
17366 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
17369 /// \brief Return (and \p Op, \p Mask) for compare instructions or
17370 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
17371 /// necessary casting for \p Mask when lowering masking intrinsics.
17372 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
17373 SDValue PreservedSrc,
17374 const X86Subtarget *Subtarget,
17375 SelectionDAG &DAG) {
17376 EVT VT = Op.getValueType();
17377 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
17378 MVT::i1, VT.getVectorNumElements());
17379 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17380 Mask.getValueType().getSizeInBits());
17383 assert(MaskVT.isSimple() && "invalid mask type");
17385 if (isAllOnes(Mask))
17388 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17389 // are extracted by EXTRACT_SUBVECTOR.
17390 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17391 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17392 DAG.getIntPtrConstant(0));
17394 switch (Op.getOpcode()) {
17396 case X86ISD::PCMPEQM:
17397 case X86ISD::PCMPGTM:
17399 case X86ISD::CMPMU:
17400 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17402 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17403 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17404 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17407 /// \brief Creates an SDNode for a predicated scalar operation.
17408 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17409 /// The mask is comming as MVT::i8 and it should be truncated
17410 /// to MVT::i1 while lowering masking intrinsics.
17411 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17412 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17413 /// a scalar instruction.
17414 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17415 SDValue PreservedSrc,
17416 const X86Subtarget *Subtarget,
17417 SelectionDAG &DAG) {
17418 if (isAllOnes(Mask))
17421 EVT VT = Op.getValueType();
17423 // The mask should be of type MVT::i1
17424 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17426 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17427 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17428 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17431 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17432 SelectionDAG &DAG) {
17434 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17435 EVT VT = Op.getValueType();
17436 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17438 switch(IntrData->Type) {
17439 case INTR_TYPE_1OP:
17440 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17441 case INTR_TYPE_2OP:
17442 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17444 case INTR_TYPE_3OP:
17445 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17446 Op.getOperand(2), Op.getOperand(3));
17447 case INTR_TYPE_1OP_MASK_RM: {
17448 SDValue Src = Op.getOperand(1);
17449 SDValue Src0 = Op.getOperand(2);
17450 SDValue Mask = Op.getOperand(3);
17451 SDValue RoundingMode = Op.getOperand(4);
17452 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17454 Mask, Src0, Subtarget, DAG);
17456 case INTR_TYPE_SCALAR_MASK_RM: {
17457 SDValue Src1 = Op.getOperand(1);
17458 SDValue Src2 = Op.getOperand(2);
17459 SDValue Src0 = Op.getOperand(3);
17460 SDValue Mask = Op.getOperand(4);
17461 SDValue RoundingMode = Op.getOperand(5);
17462 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17464 Mask, Src0, Subtarget, DAG);
17466 case INTR_TYPE_2OP_MASK: {
17467 SDValue Mask = Op.getOperand(4);
17468 SDValue PassThru = Op.getOperand(3);
17469 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17470 if (IntrWithRoundingModeOpcode != 0) {
17471 unsigned Round = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
17472 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17473 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17474 dl, Op.getValueType(),
17475 Op.getOperand(1), Op.getOperand(2),
17476 Op.getOperand(3), Op.getOperand(5)),
17477 Mask, PassThru, Subtarget, DAG);
17480 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17483 Mask, PassThru, Subtarget, DAG);
17485 case FMA_OP_MASK: {
17486 SDValue Src1 = Op.getOperand(1);
17487 SDValue Src2 = Op.getOperand(2);
17488 SDValue Src3 = Op.getOperand(3);
17489 SDValue Mask = Op.getOperand(4);
17490 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17491 if (IntrWithRoundingModeOpcode != 0) {
17492 SDValue Rnd = Op.getOperand(5);
17493 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17494 X86::STATIC_ROUNDING::CUR_DIRECTION)
17495 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17496 dl, Op.getValueType(),
17497 Src1, Src2, Src3, Rnd),
17498 Mask, Src1, Subtarget, DAG);
17500 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17501 dl, Op.getValueType(),
17503 Mask, Src1, Subtarget, DAG);
17506 case CMP_MASK_CC: {
17507 // Comparison intrinsics with masks.
17508 // Example of transformation:
17509 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17510 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17512 // (v8i1 (insert_subvector undef,
17513 // (v2i1 (and (PCMPEQM %a, %b),
17514 // (extract_subvector
17515 // (v8i1 (bitcast %mask)), 0))), 0))))
17516 EVT VT = Op.getOperand(1).getValueType();
17517 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17518 VT.getVectorNumElements());
17519 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17520 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17521 Mask.getValueType().getSizeInBits());
17523 if (IntrData->Type == CMP_MASK_CC) {
17524 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17525 Op.getOperand(2), Op.getOperand(3));
17527 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17528 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17531 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17532 DAG.getTargetConstant(0, MaskVT),
17534 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17535 DAG.getUNDEF(BitcastVT), CmpMask,
17536 DAG.getIntPtrConstant(0));
17537 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17539 case COMI: { // Comparison intrinsics
17540 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17541 SDValue LHS = Op.getOperand(1);
17542 SDValue RHS = Op.getOperand(2);
17543 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17544 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17545 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17546 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17547 DAG.getConstant(X86CC, MVT::i8), Cond);
17548 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17551 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17552 Op.getOperand(1), Op.getOperand(2), DAG);
17554 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17555 Op.getSimpleValueType(),
17557 Op.getOperand(2), DAG),
17558 Op.getOperand(4), Op.getOperand(3), Subtarget,
17560 case COMPRESS_EXPAND_IN_REG: {
17561 SDValue Mask = Op.getOperand(3);
17562 SDValue DataToCompress = Op.getOperand(1);
17563 SDValue PassThru = Op.getOperand(2);
17564 if (isAllOnes(Mask)) // return data as is
17565 return Op.getOperand(1);
17566 EVT VT = Op.getValueType();
17567 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17568 VT.getVectorNumElements());
17569 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17570 Mask.getValueType().getSizeInBits());
17572 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17573 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17574 DAG.getIntPtrConstant(0));
17576 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17580 SDValue Mask = Op.getOperand(3);
17581 EVT VT = Op.getValueType();
17582 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17583 VT.getVectorNumElements());
17584 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17585 Mask.getValueType().getSizeInBits());
17587 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17588 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17589 DAG.getIntPtrConstant(0));
17590 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17599 default: return SDValue(); // Don't custom lower most intrinsics.
17601 case Intrinsic::x86_avx512_mask_valign_q_512:
17602 case Intrinsic::x86_avx512_mask_valign_d_512:
17603 // Vector source operands are swapped.
17604 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17605 Op.getValueType(), Op.getOperand(2),
17608 Op.getOperand(5), Op.getOperand(4),
17611 // ptest and testp intrinsics. The intrinsic these come from are designed to
17612 // return an integer value, not just an instruction so lower it to the ptest
17613 // or testp pattern and a setcc for the result.
17614 case Intrinsic::x86_sse41_ptestz:
17615 case Intrinsic::x86_sse41_ptestc:
17616 case Intrinsic::x86_sse41_ptestnzc:
17617 case Intrinsic::x86_avx_ptestz_256:
17618 case Intrinsic::x86_avx_ptestc_256:
17619 case Intrinsic::x86_avx_ptestnzc_256:
17620 case Intrinsic::x86_avx_vtestz_ps:
17621 case Intrinsic::x86_avx_vtestc_ps:
17622 case Intrinsic::x86_avx_vtestnzc_ps:
17623 case Intrinsic::x86_avx_vtestz_pd:
17624 case Intrinsic::x86_avx_vtestc_pd:
17625 case Intrinsic::x86_avx_vtestnzc_pd:
17626 case Intrinsic::x86_avx_vtestz_ps_256:
17627 case Intrinsic::x86_avx_vtestc_ps_256:
17628 case Intrinsic::x86_avx_vtestnzc_ps_256:
17629 case Intrinsic::x86_avx_vtestz_pd_256:
17630 case Intrinsic::x86_avx_vtestc_pd_256:
17631 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17632 bool IsTestPacked = false;
17635 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17636 case Intrinsic::x86_avx_vtestz_ps:
17637 case Intrinsic::x86_avx_vtestz_pd:
17638 case Intrinsic::x86_avx_vtestz_ps_256:
17639 case Intrinsic::x86_avx_vtestz_pd_256:
17640 IsTestPacked = true; // Fallthrough
17641 case Intrinsic::x86_sse41_ptestz:
17642 case Intrinsic::x86_avx_ptestz_256:
17644 X86CC = X86::COND_E;
17646 case Intrinsic::x86_avx_vtestc_ps:
17647 case Intrinsic::x86_avx_vtestc_pd:
17648 case Intrinsic::x86_avx_vtestc_ps_256:
17649 case Intrinsic::x86_avx_vtestc_pd_256:
17650 IsTestPacked = true; // Fallthrough
17651 case Intrinsic::x86_sse41_ptestc:
17652 case Intrinsic::x86_avx_ptestc_256:
17654 X86CC = X86::COND_B;
17656 case Intrinsic::x86_avx_vtestnzc_ps:
17657 case Intrinsic::x86_avx_vtestnzc_pd:
17658 case Intrinsic::x86_avx_vtestnzc_ps_256:
17659 case Intrinsic::x86_avx_vtestnzc_pd_256:
17660 IsTestPacked = true; // Fallthrough
17661 case Intrinsic::x86_sse41_ptestnzc:
17662 case Intrinsic::x86_avx_ptestnzc_256:
17664 X86CC = X86::COND_A;
17668 SDValue LHS = Op.getOperand(1);
17669 SDValue RHS = Op.getOperand(2);
17670 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17671 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17672 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17673 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17674 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17676 case Intrinsic::x86_avx512_kortestz_w:
17677 case Intrinsic::x86_avx512_kortestc_w: {
17678 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17679 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17680 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17681 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17682 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17683 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17684 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17687 case Intrinsic::x86_sse42_pcmpistria128:
17688 case Intrinsic::x86_sse42_pcmpestria128:
17689 case Intrinsic::x86_sse42_pcmpistric128:
17690 case Intrinsic::x86_sse42_pcmpestric128:
17691 case Intrinsic::x86_sse42_pcmpistrio128:
17692 case Intrinsic::x86_sse42_pcmpestrio128:
17693 case Intrinsic::x86_sse42_pcmpistris128:
17694 case Intrinsic::x86_sse42_pcmpestris128:
17695 case Intrinsic::x86_sse42_pcmpistriz128:
17696 case Intrinsic::x86_sse42_pcmpestriz128: {
17700 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17701 case Intrinsic::x86_sse42_pcmpistria128:
17702 Opcode = X86ISD::PCMPISTRI;
17703 X86CC = X86::COND_A;
17705 case Intrinsic::x86_sse42_pcmpestria128:
17706 Opcode = X86ISD::PCMPESTRI;
17707 X86CC = X86::COND_A;
17709 case Intrinsic::x86_sse42_pcmpistric128:
17710 Opcode = X86ISD::PCMPISTRI;
17711 X86CC = X86::COND_B;
17713 case Intrinsic::x86_sse42_pcmpestric128:
17714 Opcode = X86ISD::PCMPESTRI;
17715 X86CC = X86::COND_B;
17717 case Intrinsic::x86_sse42_pcmpistrio128:
17718 Opcode = X86ISD::PCMPISTRI;
17719 X86CC = X86::COND_O;
17721 case Intrinsic::x86_sse42_pcmpestrio128:
17722 Opcode = X86ISD::PCMPESTRI;
17723 X86CC = X86::COND_O;
17725 case Intrinsic::x86_sse42_pcmpistris128:
17726 Opcode = X86ISD::PCMPISTRI;
17727 X86CC = X86::COND_S;
17729 case Intrinsic::x86_sse42_pcmpestris128:
17730 Opcode = X86ISD::PCMPESTRI;
17731 X86CC = X86::COND_S;
17733 case Intrinsic::x86_sse42_pcmpistriz128:
17734 Opcode = X86ISD::PCMPISTRI;
17735 X86CC = X86::COND_E;
17737 case Intrinsic::x86_sse42_pcmpestriz128:
17738 Opcode = X86ISD::PCMPESTRI;
17739 X86CC = X86::COND_E;
17742 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17743 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17744 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17745 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17746 DAG.getConstant(X86CC, MVT::i8),
17747 SDValue(PCMP.getNode(), 1));
17748 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17751 case Intrinsic::x86_sse42_pcmpistri128:
17752 case Intrinsic::x86_sse42_pcmpestri128: {
17754 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17755 Opcode = X86ISD::PCMPISTRI;
17757 Opcode = X86ISD::PCMPESTRI;
17759 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17760 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17761 return DAG.getNode(Opcode, dl, VTs, NewOps);
17766 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17767 SDValue Src, SDValue Mask, SDValue Base,
17768 SDValue Index, SDValue ScaleOp, SDValue Chain,
17769 const X86Subtarget * Subtarget) {
17771 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17772 assert(C && "Invalid scale type");
17773 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17774 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17775 Index.getSimpleValueType().getVectorNumElements());
17777 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17779 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17781 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17782 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17783 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17784 SDValue Segment = DAG.getRegister(0, MVT::i32);
17785 if (Src.getOpcode() == ISD::UNDEF)
17786 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17787 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17788 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17789 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17790 return DAG.getMergeValues(RetOps, dl);
17793 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17794 SDValue Src, SDValue Mask, SDValue Base,
17795 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17797 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17798 assert(C && "Invalid scale type");
17799 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17800 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17801 SDValue Segment = DAG.getRegister(0, MVT::i32);
17802 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17803 Index.getSimpleValueType().getVectorNumElements());
17805 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17807 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17809 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17810 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17811 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17812 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17813 return SDValue(Res, 1);
17816 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17817 SDValue Mask, SDValue Base, SDValue Index,
17818 SDValue ScaleOp, SDValue Chain) {
17820 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17821 assert(C && "Invalid scale type");
17822 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17823 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17824 SDValue Segment = DAG.getRegister(0, MVT::i32);
17826 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17828 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17830 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17832 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17833 //SDVTList VTs = DAG.getVTList(MVT::Other);
17834 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17835 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17836 return SDValue(Res, 0);
17839 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17840 // read performance monitor counters (x86_rdpmc).
17841 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17842 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17843 SmallVectorImpl<SDValue> &Results) {
17844 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17845 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17848 // The ECX register is used to select the index of the performance counter
17850 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17852 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17854 // Reads the content of a 64-bit performance counter and returns it in the
17855 // registers EDX:EAX.
17856 if (Subtarget->is64Bit()) {
17857 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17858 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17861 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17862 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17865 Chain = HI.getValue(1);
17867 if (Subtarget->is64Bit()) {
17868 // The EAX register is loaded with the low-order 32 bits. The EDX register
17869 // is loaded with the supported high-order bits of the counter.
17870 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17871 DAG.getConstant(32, MVT::i8));
17872 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17873 Results.push_back(Chain);
17877 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17878 SDValue Ops[] = { LO, HI };
17879 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17880 Results.push_back(Pair);
17881 Results.push_back(Chain);
17884 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17885 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17886 // also used to custom lower READCYCLECOUNTER nodes.
17887 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17888 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17889 SmallVectorImpl<SDValue> &Results) {
17890 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17891 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17894 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17895 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17896 // and the EAX register is loaded with the low-order 32 bits.
17897 if (Subtarget->is64Bit()) {
17898 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17899 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17902 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17903 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17906 SDValue Chain = HI.getValue(1);
17908 if (Opcode == X86ISD::RDTSCP_DAG) {
17909 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17911 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17912 // the ECX register. Add 'ecx' explicitly to the chain.
17913 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17915 // Explicitly store the content of ECX at the location passed in input
17916 // to the 'rdtscp' intrinsic.
17917 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17918 MachinePointerInfo(), false, false, 0);
17921 if (Subtarget->is64Bit()) {
17922 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17923 // the EAX register is loaded with the low-order 32 bits.
17924 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17925 DAG.getConstant(32, MVT::i8));
17926 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17927 Results.push_back(Chain);
17931 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17932 SDValue Ops[] = { LO, HI };
17933 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17934 Results.push_back(Pair);
17935 Results.push_back(Chain);
17938 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17939 SelectionDAG &DAG) {
17940 SmallVector<SDValue, 2> Results;
17942 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17944 return DAG.getMergeValues(Results, DL);
17948 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17949 SelectionDAG &DAG) {
17950 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17952 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17957 switch(IntrData->Type) {
17959 llvm_unreachable("Unknown Intrinsic Type");
17963 // Emit the node with the right value type.
17964 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17965 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17967 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17968 // Otherwise return the value from Rand, which is always 0, casted to i32.
17969 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17970 DAG.getConstant(1, Op->getValueType(1)),
17971 DAG.getConstant(X86::COND_B, MVT::i32),
17972 SDValue(Result.getNode(), 1) };
17973 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17974 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17977 // Return { result, isValid, chain }.
17978 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17979 SDValue(Result.getNode(), 2));
17982 //gather(v1, mask, index, base, scale);
17983 SDValue Chain = Op.getOperand(0);
17984 SDValue Src = Op.getOperand(2);
17985 SDValue Base = Op.getOperand(3);
17986 SDValue Index = Op.getOperand(4);
17987 SDValue Mask = Op.getOperand(5);
17988 SDValue Scale = Op.getOperand(6);
17989 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
17993 //scatter(base, mask, index, v1, scale);
17994 SDValue Chain = Op.getOperand(0);
17995 SDValue Base = Op.getOperand(2);
17996 SDValue Mask = Op.getOperand(3);
17997 SDValue Index = Op.getOperand(4);
17998 SDValue Src = Op.getOperand(5);
17999 SDValue Scale = Op.getOperand(6);
18000 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
18003 SDValue Hint = Op.getOperand(6);
18005 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
18006 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
18007 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
18008 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
18009 SDValue Chain = Op.getOperand(0);
18010 SDValue Mask = Op.getOperand(2);
18011 SDValue Index = Op.getOperand(3);
18012 SDValue Base = Op.getOperand(4);
18013 SDValue Scale = Op.getOperand(5);
18014 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
18016 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
18018 SmallVector<SDValue, 2> Results;
18019 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
18020 return DAG.getMergeValues(Results, dl);
18022 // Read Performance Monitoring Counters.
18024 SmallVector<SDValue, 2> Results;
18025 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
18026 return DAG.getMergeValues(Results, dl);
18028 // XTEST intrinsics.
18030 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
18031 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
18032 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18033 DAG.getConstant(X86::COND_NE, MVT::i8),
18035 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
18036 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
18037 Ret, SDValue(InTrans.getNode(), 1));
18041 SmallVector<SDValue, 2> Results;
18042 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
18043 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
18044 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
18045 DAG.getConstant(-1, MVT::i8));
18046 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
18047 Op.getOperand(4), GenCF.getValue(1));
18048 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
18049 Op.getOperand(5), MachinePointerInfo(),
18051 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18052 DAG.getConstant(X86::COND_B, MVT::i8),
18054 Results.push_back(SetCC);
18055 Results.push_back(Store);
18056 return DAG.getMergeValues(Results, dl);
18058 case COMPRESS_TO_MEM: {
18060 SDValue Mask = Op.getOperand(4);
18061 SDValue DataToCompress = Op.getOperand(3);
18062 SDValue Addr = Op.getOperand(2);
18063 SDValue Chain = Op.getOperand(0);
18065 if (isAllOnes(Mask)) // return just a store
18066 return DAG.getStore(Chain, dl, DataToCompress, Addr,
18067 MachinePointerInfo(), false, false, 0);
18069 EVT VT = DataToCompress.getValueType();
18070 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18071 VT.getVectorNumElements());
18072 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18073 Mask.getValueType().getSizeInBits());
18074 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18075 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18076 DAG.getIntPtrConstant(0));
18078 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
18079 DataToCompress, DAG.getUNDEF(VT));
18080 return DAG.getStore(Chain, dl, Compressed, Addr,
18081 MachinePointerInfo(), false, false, 0);
18083 case EXPAND_FROM_MEM: {
18085 SDValue Mask = Op.getOperand(4);
18086 SDValue PathThru = Op.getOperand(3);
18087 SDValue Addr = Op.getOperand(2);
18088 SDValue Chain = Op.getOperand(0);
18089 EVT VT = Op.getValueType();
18091 if (isAllOnes(Mask)) // return just a load
18092 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
18094 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18095 VT.getVectorNumElements());
18096 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18097 Mask.getValueType().getSizeInBits());
18098 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18099 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18100 DAG.getIntPtrConstant(0));
18102 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
18103 false, false, false, 0);
18105 SmallVector<SDValue, 2> Results;
18106 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
18108 Results.push_back(Chain);
18109 return DAG.getMergeValues(Results, dl);
18114 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
18115 SelectionDAG &DAG) const {
18116 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
18117 MFI->setReturnAddressIsTaken(true);
18119 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
18122 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18124 EVT PtrVT = getPointerTy();
18127 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
18128 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18129 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
18130 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18131 DAG.getNode(ISD::ADD, dl, PtrVT,
18132 FrameAddr, Offset),
18133 MachinePointerInfo(), false, false, false, 0);
18136 // Just load the return address.
18137 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
18138 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18139 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
18142 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
18143 MachineFunction &MF = DAG.getMachineFunction();
18144 MachineFrameInfo *MFI = MF.getFrameInfo();
18145 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
18146 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18147 EVT VT = Op.getValueType();
18149 MFI->setFrameAddressIsTaken(true);
18151 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
18152 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
18153 // is not possible to crawl up the stack without looking at the unwind codes
18155 int FrameAddrIndex = FuncInfo->getFAIndex();
18156 if (!FrameAddrIndex) {
18157 // Set up a frame object for the return address.
18158 unsigned SlotSize = RegInfo->getSlotSize();
18159 FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
18160 SlotSize, /*Offset=*/INT64_MIN, /*IsImmutable=*/false);
18161 FuncInfo->setFAIndex(FrameAddrIndex);
18163 return DAG.getFrameIndex(FrameAddrIndex, VT);
18166 unsigned FrameReg =
18167 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
18168 SDLoc dl(Op); // FIXME probably not meaningful
18169 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18170 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
18171 (FrameReg == X86::EBP && VT == MVT::i32)) &&
18172 "Invalid Frame Register!");
18173 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
18175 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
18176 MachinePointerInfo(),
18177 false, false, false, 0);
18181 // FIXME? Maybe this could be a TableGen attribute on some registers and
18182 // this table could be generated automatically from RegInfo.
18183 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
18185 unsigned Reg = StringSwitch<unsigned>(RegName)
18186 .Case("esp", X86::ESP)
18187 .Case("rsp", X86::RSP)
18191 report_fatal_error("Invalid register name global variable");
18194 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
18195 SelectionDAG &DAG) const {
18196 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18197 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
18200 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
18201 SDValue Chain = Op.getOperand(0);
18202 SDValue Offset = Op.getOperand(1);
18203 SDValue Handler = Op.getOperand(2);
18206 EVT PtrVT = getPointerTy();
18207 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18208 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
18209 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
18210 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
18211 "Invalid Frame Register!");
18212 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
18213 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
18215 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
18216 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
18217 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
18218 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
18220 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
18222 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
18223 DAG.getRegister(StoreAddrReg, PtrVT));
18226 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
18227 SelectionDAG &DAG) const {
18229 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
18230 DAG.getVTList(MVT::i32, MVT::Other),
18231 Op.getOperand(0), Op.getOperand(1));
18234 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
18235 SelectionDAG &DAG) const {
18237 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
18238 Op.getOperand(0), Op.getOperand(1));
18241 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
18242 return Op.getOperand(0);
18245 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
18246 SelectionDAG &DAG) const {
18247 SDValue Root = Op.getOperand(0);
18248 SDValue Trmp = Op.getOperand(1); // trampoline
18249 SDValue FPtr = Op.getOperand(2); // nested function
18250 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
18253 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
18254 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
18256 if (Subtarget->is64Bit()) {
18257 SDValue OutChains[6];
18259 // Large code-model.
18260 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
18261 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
18263 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
18264 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
18266 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
18268 // Load the pointer to the nested function into R11.
18269 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
18270 SDValue Addr = Trmp;
18271 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18272 Addr, MachinePointerInfo(TrmpAddr),
18275 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18276 DAG.getConstant(2, MVT::i64));
18277 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
18278 MachinePointerInfo(TrmpAddr, 2),
18281 // Load the 'nest' parameter value into R10.
18282 // R10 is specified in X86CallingConv.td
18283 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
18284 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18285 DAG.getConstant(10, MVT::i64));
18286 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18287 Addr, MachinePointerInfo(TrmpAddr, 10),
18290 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18291 DAG.getConstant(12, MVT::i64));
18292 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
18293 MachinePointerInfo(TrmpAddr, 12),
18296 // Jump to the nested function.
18297 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
18298 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18299 DAG.getConstant(20, MVT::i64));
18300 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18301 Addr, MachinePointerInfo(TrmpAddr, 20),
18304 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
18305 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18306 DAG.getConstant(22, MVT::i64));
18307 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
18308 MachinePointerInfo(TrmpAddr, 22),
18311 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18313 const Function *Func =
18314 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
18315 CallingConv::ID CC = Func->getCallingConv();
18320 llvm_unreachable("Unsupported calling convention");
18321 case CallingConv::C:
18322 case CallingConv::X86_StdCall: {
18323 // Pass 'nest' parameter in ECX.
18324 // Must be kept in sync with X86CallingConv.td
18325 NestReg = X86::ECX;
18327 // Check that ECX wasn't needed by an 'inreg' parameter.
18328 FunctionType *FTy = Func->getFunctionType();
18329 const AttributeSet &Attrs = Func->getAttributes();
18331 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18332 unsigned InRegCount = 0;
18335 for (FunctionType::param_iterator I = FTy->param_begin(),
18336 E = FTy->param_end(); I != E; ++I, ++Idx)
18337 if (Attrs.hasAttribute(Idx, Attribute::InReg))
18338 // FIXME: should only count parameters that are lowered to integers.
18339 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
18341 if (InRegCount > 2) {
18342 report_fatal_error("Nest register in use - reduce number of inreg"
18348 case CallingConv::X86_FastCall:
18349 case CallingConv::X86_ThisCall:
18350 case CallingConv::Fast:
18351 // Pass 'nest' parameter in EAX.
18352 // Must be kept in sync with X86CallingConv.td
18353 NestReg = X86::EAX;
18357 SDValue OutChains[4];
18358 SDValue Addr, Disp;
18360 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18361 DAG.getConstant(10, MVT::i32));
18362 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18364 // This is storing the opcode for MOV32ri.
18365 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18366 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18367 OutChains[0] = DAG.getStore(Root, dl,
18368 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
18369 Trmp, MachinePointerInfo(TrmpAddr),
18372 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18373 DAG.getConstant(1, MVT::i32));
18374 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
18375 MachinePointerInfo(TrmpAddr, 1),
18378 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18379 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18380 DAG.getConstant(5, MVT::i32));
18381 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
18382 MachinePointerInfo(TrmpAddr, 5),
18385 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18386 DAG.getConstant(6, MVT::i32));
18387 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18388 MachinePointerInfo(TrmpAddr, 6),
18391 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18395 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18396 SelectionDAG &DAG) const {
18398 The rounding mode is in bits 11:10 of FPSR, and has the following
18400 00 Round to nearest
18405 FLT_ROUNDS, on the other hand, expects the following:
18412 To perform the conversion, we do:
18413 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18416 MachineFunction &MF = DAG.getMachineFunction();
18417 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
18418 unsigned StackAlignment = TFI.getStackAlignment();
18419 MVT VT = Op.getSimpleValueType();
18422 // Save FP Control Word to stack slot
18423 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18424 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18426 MachineMemOperand *MMO =
18427 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18428 MachineMemOperand::MOStore, 2, 2);
18430 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18431 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18432 DAG.getVTList(MVT::Other),
18433 Ops, MVT::i16, MMO);
18435 // Load FP Control Word from stack slot
18436 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18437 MachinePointerInfo(), false, false, false, 0);
18439 // Transform as necessary
18441 DAG.getNode(ISD::SRL, DL, MVT::i16,
18442 DAG.getNode(ISD::AND, DL, MVT::i16,
18443 CWD, DAG.getConstant(0x800, MVT::i16)),
18444 DAG.getConstant(11, MVT::i8));
18446 DAG.getNode(ISD::SRL, DL, MVT::i16,
18447 DAG.getNode(ISD::AND, DL, MVT::i16,
18448 CWD, DAG.getConstant(0x400, MVT::i16)),
18449 DAG.getConstant(9, MVT::i8));
18452 DAG.getNode(ISD::AND, DL, MVT::i16,
18453 DAG.getNode(ISD::ADD, DL, MVT::i16,
18454 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18455 DAG.getConstant(1, MVT::i16)),
18456 DAG.getConstant(3, MVT::i16));
18458 return DAG.getNode((VT.getSizeInBits() < 16 ?
18459 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18462 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18463 MVT VT = Op.getSimpleValueType();
18465 unsigned NumBits = VT.getSizeInBits();
18468 Op = Op.getOperand(0);
18469 if (VT == MVT::i8) {
18470 // Zero extend to i32 since there is not an i8 bsr.
18472 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18475 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18476 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18477 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18479 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18482 DAG.getConstant(NumBits+NumBits-1, OpVT),
18483 DAG.getConstant(X86::COND_E, MVT::i8),
18486 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18488 // Finally xor with NumBits-1.
18489 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18492 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18496 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18497 MVT VT = Op.getSimpleValueType();
18499 unsigned NumBits = VT.getSizeInBits();
18502 Op = Op.getOperand(0);
18503 if (VT == MVT::i8) {
18504 // Zero extend to i32 since there is not an i8 bsr.
18506 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18509 // Issue a bsr (scan bits in reverse).
18510 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18511 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18513 // And xor with NumBits-1.
18514 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18517 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18521 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18522 MVT VT = Op.getSimpleValueType();
18523 unsigned NumBits = VT.getSizeInBits();
18525 Op = Op.getOperand(0);
18527 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18528 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18529 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18531 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18534 DAG.getConstant(NumBits, VT),
18535 DAG.getConstant(X86::COND_E, MVT::i8),
18538 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18541 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18542 // ones, and then concatenate the result back.
18543 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18544 MVT VT = Op.getSimpleValueType();
18546 assert(VT.is256BitVector() && VT.isInteger() &&
18547 "Unsupported value type for operation");
18549 unsigned NumElems = VT.getVectorNumElements();
18552 // Extract the LHS vectors
18553 SDValue LHS = Op.getOperand(0);
18554 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18555 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18557 // Extract the RHS vectors
18558 SDValue RHS = Op.getOperand(1);
18559 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18560 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18562 MVT EltVT = VT.getVectorElementType();
18563 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18565 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18566 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18567 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18570 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18571 assert(Op.getSimpleValueType().is256BitVector() &&
18572 Op.getSimpleValueType().isInteger() &&
18573 "Only handle AVX 256-bit vector integer operation");
18574 return Lower256IntArith(Op, DAG);
18577 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18578 assert(Op.getSimpleValueType().is256BitVector() &&
18579 Op.getSimpleValueType().isInteger() &&
18580 "Only handle AVX 256-bit vector integer operation");
18581 return Lower256IntArith(Op, DAG);
18584 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18585 SelectionDAG &DAG) {
18587 MVT VT = Op.getSimpleValueType();
18589 // Decompose 256-bit ops into smaller 128-bit ops.
18590 if (VT.is256BitVector() && !Subtarget->hasInt256())
18591 return Lower256IntArith(Op, DAG);
18593 SDValue A = Op.getOperand(0);
18594 SDValue B = Op.getOperand(1);
18596 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18597 if (VT == MVT::v4i32) {
18598 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18599 "Should not custom lower when pmuldq is available!");
18601 // Extract the odd parts.
18602 static const int UnpackMask[] = { 1, -1, 3, -1 };
18603 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18604 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18606 // Multiply the even parts.
18607 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18608 // Now multiply odd parts.
18609 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18611 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18612 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18614 // Merge the two vectors back together with a shuffle. This expands into 2
18616 static const int ShufMask[] = { 0, 4, 2, 6 };
18617 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18620 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18621 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18623 // Ahi = psrlqi(a, 32);
18624 // Bhi = psrlqi(b, 32);
18626 // AloBlo = pmuludq(a, b);
18627 // AloBhi = pmuludq(a, Bhi);
18628 // AhiBlo = pmuludq(Ahi, b);
18630 // AloBhi = psllqi(AloBhi, 32);
18631 // AhiBlo = psllqi(AhiBlo, 32);
18632 // return AloBlo + AloBhi + AhiBlo;
18634 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18635 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18637 // Bit cast to 32-bit vectors for MULUDQ
18638 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18639 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18640 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18641 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18642 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18643 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18645 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18646 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18647 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18649 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18650 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18652 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18653 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18656 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18657 assert(Subtarget->isTargetWin64() && "Unexpected target");
18658 EVT VT = Op.getValueType();
18659 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18660 "Unexpected return type for lowering");
18664 switch (Op->getOpcode()) {
18665 default: llvm_unreachable("Unexpected request for libcall!");
18666 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18667 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18668 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18669 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18670 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18671 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18675 SDValue InChain = DAG.getEntryNode();
18677 TargetLowering::ArgListTy Args;
18678 TargetLowering::ArgListEntry Entry;
18679 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18680 EVT ArgVT = Op->getOperand(i).getValueType();
18681 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18682 "Unexpected argument type for lowering");
18683 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18684 Entry.Node = StackPtr;
18685 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18687 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18688 Entry.Ty = PointerType::get(ArgTy,0);
18689 Entry.isSExt = false;
18690 Entry.isZExt = false;
18691 Args.push_back(Entry);
18694 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18697 TargetLowering::CallLoweringInfo CLI(DAG);
18698 CLI.setDebugLoc(dl).setChain(InChain)
18699 .setCallee(getLibcallCallingConv(LC),
18700 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18701 Callee, std::move(Args), 0)
18702 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18704 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18705 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18708 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18709 SelectionDAG &DAG) {
18710 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18711 EVT VT = Op0.getValueType();
18714 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18715 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18717 // PMULxD operations multiply each even value (starting at 0) of LHS with
18718 // the related value of RHS and produce a widen result.
18719 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18720 // => <2 x i64> <ae|cg>
18722 // In other word, to have all the results, we need to perform two PMULxD:
18723 // 1. one with the even values.
18724 // 2. one with the odd values.
18725 // To achieve #2, with need to place the odd values at an even position.
18727 // Place the odd value at an even position (basically, shift all values 1
18728 // step to the left):
18729 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18730 // <a|b|c|d> => <b|undef|d|undef>
18731 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18732 // <e|f|g|h> => <f|undef|h|undef>
18733 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18735 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18737 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18738 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18740 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18741 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18742 // => <2 x i64> <ae|cg>
18743 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18744 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18745 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18746 // => <2 x i64> <bf|dh>
18747 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18748 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18750 // Shuffle it back into the right order.
18751 SDValue Highs, Lows;
18752 if (VT == MVT::v8i32) {
18753 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18754 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18755 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18756 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18758 const int HighMask[] = {1, 5, 3, 7};
18759 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18760 const int LowMask[] = {0, 4, 2, 6};
18761 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18764 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18765 // unsigned multiply.
18766 if (IsSigned && !Subtarget->hasSSE41()) {
18768 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18769 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18770 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18771 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18772 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18774 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18775 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18778 // The first result of MUL_LOHI is actually the low value, followed by the
18780 SDValue Ops[] = {Lows, Highs};
18781 return DAG.getMergeValues(Ops, dl);
18784 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18785 const X86Subtarget *Subtarget) {
18786 MVT VT = Op.getSimpleValueType();
18788 SDValue R = Op.getOperand(0);
18789 SDValue Amt = Op.getOperand(1);
18791 // Optimize shl/srl/sra with constant shift amount.
18792 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18793 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18794 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18796 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18797 (Subtarget->hasInt256() &&
18798 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18799 (Subtarget->hasAVX512() &&
18800 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18801 if (Op.getOpcode() == ISD::SHL)
18802 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18804 if (Op.getOpcode() == ISD::SRL)
18805 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18807 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18808 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18812 if (VT == MVT::v16i8) {
18813 if (Op.getOpcode() == ISD::SHL) {
18814 // Make a large shift.
18815 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18816 MVT::v8i16, R, ShiftAmt,
18818 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18819 // Zero out the rightmost bits.
18820 SmallVector<SDValue, 16> V(16,
18821 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18823 return DAG.getNode(ISD::AND, dl, VT, SHL,
18824 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18826 if (Op.getOpcode() == ISD::SRL) {
18827 // Make a large shift.
18828 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18829 MVT::v8i16, R, ShiftAmt,
18831 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18832 // Zero out the leftmost bits.
18833 SmallVector<SDValue, 16> V(16,
18834 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18836 return DAG.getNode(ISD::AND, dl, VT, SRL,
18837 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18839 if (Op.getOpcode() == ISD::SRA) {
18840 if (ShiftAmt == 7) {
18841 // R s>> 7 === R s< 0
18842 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18843 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18846 // R s>> a === ((R u>> a) ^ m) - m
18847 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18848 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18850 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18851 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18852 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18855 llvm_unreachable("Unknown shift opcode.");
18858 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18859 if (Op.getOpcode() == ISD::SHL) {
18860 // Make a large shift.
18861 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18862 MVT::v16i16, R, ShiftAmt,
18864 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18865 // Zero out the rightmost bits.
18866 SmallVector<SDValue, 32> V(32,
18867 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18869 return DAG.getNode(ISD::AND, dl, VT, SHL,
18870 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18872 if (Op.getOpcode() == ISD::SRL) {
18873 // Make a large shift.
18874 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18875 MVT::v16i16, R, ShiftAmt,
18877 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18878 // Zero out the leftmost bits.
18879 SmallVector<SDValue, 32> V(32,
18880 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18882 return DAG.getNode(ISD::AND, dl, VT, SRL,
18883 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18885 if (Op.getOpcode() == ISD::SRA) {
18886 if (ShiftAmt == 7) {
18887 // R s>> 7 === R s< 0
18888 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18889 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18892 // R s>> a === ((R u>> a) ^ m) - m
18893 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18894 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18896 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18897 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18898 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18901 llvm_unreachable("Unknown shift opcode.");
18906 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18907 if (!Subtarget->is64Bit() &&
18908 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18909 Amt.getOpcode() == ISD::BITCAST &&
18910 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18911 Amt = Amt.getOperand(0);
18912 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18913 VT.getVectorNumElements();
18914 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18915 uint64_t ShiftAmt = 0;
18916 for (unsigned i = 0; i != Ratio; ++i) {
18917 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18921 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18923 // Check remaining shift amounts.
18924 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18925 uint64_t ShAmt = 0;
18926 for (unsigned j = 0; j != Ratio; ++j) {
18927 ConstantSDNode *C =
18928 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18932 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18934 if (ShAmt != ShiftAmt)
18937 switch (Op.getOpcode()) {
18939 llvm_unreachable("Unknown shift opcode!");
18941 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18944 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18947 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18955 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18956 const X86Subtarget* Subtarget) {
18957 MVT VT = Op.getSimpleValueType();
18959 SDValue R = Op.getOperand(0);
18960 SDValue Amt = Op.getOperand(1);
18962 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
18963 VT == MVT::v4i32 || VT == MVT::v8i16 ||
18964 (Subtarget->hasInt256() &&
18965 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
18966 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18967 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18969 EVT EltVT = VT.getVectorElementType();
18971 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18972 // Check if this build_vector node is doing a splat.
18973 // If so, then set BaseShAmt equal to the splat value.
18974 BaseShAmt = BV->getSplatValue();
18975 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18976 BaseShAmt = SDValue();
18978 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18979 Amt = Amt.getOperand(0);
18981 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18982 if (SVN && SVN->isSplat()) {
18983 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18984 SDValue InVec = Amt.getOperand(0);
18985 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18986 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
18987 "Unexpected shuffle index found!");
18988 BaseShAmt = InVec.getOperand(SplatIdx);
18989 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
18990 if (ConstantSDNode *C =
18991 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
18992 if (C->getZExtValue() == SplatIdx)
18993 BaseShAmt = InVec.getOperand(1);
18998 // Avoid introducing an extract element from a shuffle.
18999 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
19000 DAG.getIntPtrConstant(SplatIdx));
19004 if (BaseShAmt.getNode()) {
19005 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
19006 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
19007 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
19008 else if (EltVT.bitsLT(MVT::i32))
19009 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
19011 switch (Op.getOpcode()) {
19013 llvm_unreachable("Unknown shift opcode!");
19015 switch (VT.SimpleTy) {
19016 default: return SDValue();
19025 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
19028 switch (VT.SimpleTy) {
19029 default: return SDValue();
19036 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
19039 switch (VT.SimpleTy) {
19040 default: return SDValue();
19049 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
19055 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
19056 if (!Subtarget->is64Bit() &&
19057 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
19058 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
19059 Amt.getOpcode() == ISD::BITCAST &&
19060 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
19061 Amt = Amt.getOperand(0);
19062 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
19063 VT.getVectorNumElements();
19064 std::vector<SDValue> Vals(Ratio);
19065 for (unsigned i = 0; i != Ratio; ++i)
19066 Vals[i] = Amt.getOperand(i);
19067 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
19068 for (unsigned j = 0; j != Ratio; ++j)
19069 if (Vals[j] != Amt.getOperand(i + j))
19072 switch (Op.getOpcode()) {
19074 llvm_unreachable("Unknown shift opcode!");
19076 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
19078 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
19080 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
19087 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
19088 SelectionDAG &DAG) {
19089 MVT VT = Op.getSimpleValueType();
19091 SDValue R = Op.getOperand(0);
19092 SDValue Amt = Op.getOperand(1);
19095 assert(VT.isVector() && "Custom lowering only for vector shifts!");
19096 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
19098 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
19102 V = LowerScalarVariableShift(Op, DAG, Subtarget);
19106 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
19108 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
19109 if (Subtarget->hasInt256()) {
19110 if (Op.getOpcode() == ISD::SRL &&
19111 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19112 VT == MVT::v4i64 || VT == MVT::v8i32))
19114 if (Op.getOpcode() == ISD::SHL &&
19115 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19116 VT == MVT::v4i64 || VT == MVT::v8i32))
19118 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
19122 // If possible, lower this packed shift into a vector multiply instead of
19123 // expanding it into a sequence of scalar shifts.
19124 // Do this only if the vector shift count is a constant build_vector.
19125 if (Op.getOpcode() == ISD::SHL &&
19126 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
19127 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
19128 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19129 SmallVector<SDValue, 8> Elts;
19130 EVT SVT = VT.getScalarType();
19131 unsigned SVTBits = SVT.getSizeInBits();
19132 const APInt &One = APInt(SVTBits, 1);
19133 unsigned NumElems = VT.getVectorNumElements();
19135 for (unsigned i=0; i !=NumElems; ++i) {
19136 SDValue Op = Amt->getOperand(i);
19137 if (Op->getOpcode() == ISD::UNDEF) {
19138 Elts.push_back(Op);
19142 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
19143 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
19144 uint64_t ShAmt = C.getZExtValue();
19145 if (ShAmt >= SVTBits) {
19146 Elts.push_back(DAG.getUNDEF(SVT));
19149 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
19151 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
19152 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
19155 // Lower SHL with variable shift amount.
19156 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
19157 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
19159 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
19160 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
19161 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
19162 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
19165 // If possible, lower this shift as a sequence of two shifts by
19166 // constant plus a MOVSS/MOVSD instead of scalarizing it.
19168 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
19170 // Could be rewritten as:
19171 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
19173 // The advantage is that the two shifts from the example would be
19174 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
19175 // the vector shift into four scalar shifts plus four pairs of vector
19177 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
19178 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19179 unsigned TargetOpcode = X86ISD::MOVSS;
19180 bool CanBeSimplified;
19181 // The splat value for the first packed shift (the 'X' from the example).
19182 SDValue Amt1 = Amt->getOperand(0);
19183 // The splat value for the second packed shift (the 'Y' from the example).
19184 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
19185 Amt->getOperand(2);
19187 // See if it is possible to replace this node with a sequence of
19188 // two shifts followed by a MOVSS/MOVSD
19189 if (VT == MVT::v4i32) {
19190 // Check if it is legal to use a MOVSS.
19191 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
19192 Amt2 == Amt->getOperand(3);
19193 if (!CanBeSimplified) {
19194 // Otherwise, check if we can still simplify this node using a MOVSD.
19195 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
19196 Amt->getOperand(2) == Amt->getOperand(3);
19197 TargetOpcode = X86ISD::MOVSD;
19198 Amt2 = Amt->getOperand(2);
19201 // Do similar checks for the case where the machine value type
19203 CanBeSimplified = Amt1 == Amt->getOperand(1);
19204 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
19205 CanBeSimplified = Amt2 == Amt->getOperand(i);
19207 if (!CanBeSimplified) {
19208 TargetOpcode = X86ISD::MOVSD;
19209 CanBeSimplified = true;
19210 Amt2 = Amt->getOperand(4);
19211 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
19212 CanBeSimplified = Amt1 == Amt->getOperand(i);
19213 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
19214 CanBeSimplified = Amt2 == Amt->getOperand(j);
19218 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
19219 isa<ConstantSDNode>(Amt2)) {
19220 // Replace this node with two shifts followed by a MOVSS/MOVSD.
19221 EVT CastVT = MVT::v4i32;
19223 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
19224 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
19226 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
19227 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
19228 if (TargetOpcode == X86ISD::MOVSD)
19229 CastVT = MVT::v2i64;
19230 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
19231 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
19232 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
19234 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
19238 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
19239 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
19242 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
19243 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
19245 // Turn 'a' into a mask suitable for VSELECT
19246 SDValue VSelM = DAG.getConstant(0x80, VT);
19247 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19248 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19250 SDValue CM1 = DAG.getConstant(0x0f, VT);
19251 SDValue CM2 = DAG.getConstant(0x3f, VT);
19253 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
19254 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
19255 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
19256 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19257 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19260 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19261 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19262 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19264 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
19265 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
19266 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
19267 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19268 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19271 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19272 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19273 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19275 // return VSELECT(r, r+r, a);
19276 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
19277 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
19281 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
19282 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
19283 // solution better.
19284 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
19285 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
19287 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
19288 R = DAG.getNode(ExtOpc, dl, NewVT, R);
19289 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
19290 return DAG.getNode(ISD::TRUNCATE, dl, VT,
19291 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
19294 // Decompose 256-bit shifts into smaller 128-bit shifts.
19295 if (VT.is256BitVector()) {
19296 unsigned NumElems = VT.getVectorNumElements();
19297 MVT EltVT = VT.getVectorElementType();
19298 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19300 // Extract the two vectors
19301 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
19302 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
19304 // Recreate the shift amount vectors
19305 SDValue Amt1, Amt2;
19306 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
19307 // Constant shift amount
19308 SmallVector<SDValue, 4> Amt1Csts;
19309 SmallVector<SDValue, 4> Amt2Csts;
19310 for (unsigned i = 0; i != NumElems/2; ++i)
19311 Amt1Csts.push_back(Amt->getOperand(i));
19312 for (unsigned i = NumElems/2; i != NumElems; ++i)
19313 Amt2Csts.push_back(Amt->getOperand(i));
19315 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
19316 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
19318 // Variable shift amount
19319 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
19320 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
19323 // Issue new vector shifts for the smaller types
19324 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
19325 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
19327 // Concatenate the result back
19328 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19334 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19335 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19336 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19337 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19338 // has only one use.
19339 SDNode *N = Op.getNode();
19340 SDValue LHS = N->getOperand(0);
19341 SDValue RHS = N->getOperand(1);
19342 unsigned BaseOp = 0;
19345 switch (Op.getOpcode()) {
19346 default: llvm_unreachable("Unknown ovf instruction!");
19348 // A subtract of one will be selected as a INC. Note that INC doesn't
19349 // set CF, so we can't do this for UADDO.
19350 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19352 BaseOp = X86ISD::INC;
19353 Cond = X86::COND_O;
19356 BaseOp = X86ISD::ADD;
19357 Cond = X86::COND_O;
19360 BaseOp = X86ISD::ADD;
19361 Cond = X86::COND_B;
19364 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19365 // set CF, so we can't do this for USUBO.
19366 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19368 BaseOp = X86ISD::DEC;
19369 Cond = X86::COND_O;
19372 BaseOp = X86ISD::SUB;
19373 Cond = X86::COND_O;
19376 BaseOp = X86ISD::SUB;
19377 Cond = X86::COND_B;
19380 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19381 Cond = X86::COND_O;
19383 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19384 if (N->getValueType(0) == MVT::i8) {
19385 BaseOp = X86ISD::UMUL8;
19386 Cond = X86::COND_O;
19389 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19391 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19394 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19395 DAG.getConstant(X86::COND_O, MVT::i32),
19396 SDValue(Sum.getNode(), 2));
19398 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19402 // Also sets EFLAGS.
19403 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19404 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19407 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19408 DAG.getConstant(Cond, MVT::i32),
19409 SDValue(Sum.getNode(), 1));
19411 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19414 // Sign extension of the low part of vector elements. This may be used either
19415 // when sign extend instructions are not available or if the vector element
19416 // sizes already match the sign-extended size. If the vector elements are in
19417 // their pre-extended size and sign extend instructions are available, that will
19418 // be handled by LowerSIGN_EXTEND.
19419 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19420 SelectionDAG &DAG) const {
19422 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19423 MVT VT = Op.getSimpleValueType();
19425 if (!Subtarget->hasSSE2() || !VT.isVector())
19428 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19429 ExtraVT.getScalarType().getSizeInBits();
19431 switch (VT.SimpleTy) {
19432 default: return SDValue();
19435 if (!Subtarget->hasFp256())
19437 if (!Subtarget->hasInt256()) {
19438 // needs to be split
19439 unsigned NumElems = VT.getVectorNumElements();
19441 // Extract the LHS vectors
19442 SDValue LHS = Op.getOperand(0);
19443 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19444 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19446 MVT EltVT = VT.getVectorElementType();
19447 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19449 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19450 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19451 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19453 SDValue Extra = DAG.getValueType(ExtraVT);
19455 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19456 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19458 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19463 SDValue Op0 = Op.getOperand(0);
19465 // This is a sign extension of some low part of vector elements without
19466 // changing the size of the vector elements themselves:
19467 // Shift-Left + Shift-Right-Algebraic.
19468 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19470 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19476 /// Returns true if the operand type is exactly twice the native width, and
19477 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19478 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19479 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19480 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19481 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19484 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19485 else if (OpWidth == 128)
19486 return Subtarget->hasCmpxchg16b();
19491 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19492 return needsCmpXchgNb(SI->getValueOperand()->getType());
19495 // Note: this turns large loads into lock cmpxchg8b/16b.
19496 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19497 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19498 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19499 return needsCmpXchgNb(PTy->getElementType());
19502 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19503 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19504 const Type *MemType = AI->getType();
19506 // If the operand is too big, we must see if cmpxchg8/16b is available
19507 // and default to library calls otherwise.
19508 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19509 return needsCmpXchgNb(MemType);
19511 AtomicRMWInst::BinOp Op = AI->getOperation();
19514 llvm_unreachable("Unknown atomic operation");
19515 case AtomicRMWInst::Xchg:
19516 case AtomicRMWInst::Add:
19517 case AtomicRMWInst::Sub:
19518 // It's better to use xadd, xsub or xchg for these in all cases.
19520 case AtomicRMWInst::Or:
19521 case AtomicRMWInst::And:
19522 case AtomicRMWInst::Xor:
19523 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19524 // prefix to a normal instruction for these operations.
19525 return !AI->use_empty();
19526 case AtomicRMWInst::Nand:
19527 case AtomicRMWInst::Max:
19528 case AtomicRMWInst::Min:
19529 case AtomicRMWInst::UMax:
19530 case AtomicRMWInst::UMin:
19531 // These always require a non-trivial set of data operations on x86. We must
19532 // use a cmpxchg loop.
19537 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19538 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19539 // no-sse2). There isn't any reason to disable it if the target processor
19541 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19545 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19546 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19547 const Type *MemType = AI->getType();
19548 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19549 // there is no benefit in turning such RMWs into loads, and it is actually
19550 // harmful as it introduces a mfence.
19551 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19554 auto Builder = IRBuilder<>(AI);
19555 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19556 auto SynchScope = AI->getSynchScope();
19557 // We must restrict the ordering to avoid generating loads with Release or
19558 // ReleaseAcquire orderings.
19559 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19560 auto Ptr = AI->getPointerOperand();
19562 // Before the load we need a fence. Here is an example lifted from
19563 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19566 // x.store(1, relaxed);
19567 // r1 = y.fetch_add(0, release);
19569 // y.fetch_add(42, acquire);
19570 // r2 = x.load(relaxed);
19571 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19572 // lowered to just a load without a fence. A mfence flushes the store buffer,
19573 // making the optimization clearly correct.
19574 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19575 // otherwise, we might be able to be more agressive on relaxed idempotent
19576 // rmw. In practice, they do not look useful, so we don't try to be
19577 // especially clever.
19578 if (SynchScope == SingleThread) {
19579 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19580 // the IR level, so we must wrap it in an intrinsic.
19582 } else if (hasMFENCE(*Subtarget)) {
19583 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19584 Intrinsic::x86_sse2_mfence);
19585 Builder.CreateCall(MFence);
19587 // FIXME: it might make sense to use a locked operation here but on a
19588 // different cache-line to prevent cache-line bouncing. In practice it
19589 // is probably a small win, and x86 processors without mfence are rare
19590 // enough that we do not bother.
19594 // Finally we can emit the atomic load.
19595 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19596 AI->getType()->getPrimitiveSizeInBits());
19597 Loaded->setAtomic(Order, SynchScope);
19598 AI->replaceAllUsesWith(Loaded);
19599 AI->eraseFromParent();
19603 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19604 SelectionDAG &DAG) {
19606 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19607 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19608 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19609 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19611 // The only fence that needs an instruction is a sequentially-consistent
19612 // cross-thread fence.
19613 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19614 if (hasMFENCE(*Subtarget))
19615 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19617 SDValue Chain = Op.getOperand(0);
19618 SDValue Zero = DAG.getConstant(0, MVT::i32);
19620 DAG.getRegister(X86::ESP, MVT::i32), // Base
19621 DAG.getTargetConstant(1, MVT::i8), // Scale
19622 DAG.getRegister(0, MVT::i32), // Index
19623 DAG.getTargetConstant(0, MVT::i32), // Disp
19624 DAG.getRegister(0, MVT::i32), // Segment.
19628 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19629 return SDValue(Res, 0);
19632 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19633 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19636 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19637 SelectionDAG &DAG) {
19638 MVT T = Op.getSimpleValueType();
19642 switch(T.SimpleTy) {
19643 default: llvm_unreachable("Invalid value type!");
19644 case MVT::i8: Reg = X86::AL; size = 1; break;
19645 case MVT::i16: Reg = X86::AX; size = 2; break;
19646 case MVT::i32: Reg = X86::EAX; size = 4; break;
19648 assert(Subtarget->is64Bit() && "Node not type legal!");
19649 Reg = X86::RAX; size = 8;
19652 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19653 Op.getOperand(2), SDValue());
19654 SDValue Ops[] = { cpIn.getValue(0),
19657 DAG.getTargetConstant(size, MVT::i8),
19658 cpIn.getValue(1) };
19659 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19660 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19661 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19665 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19666 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19667 MVT::i32, cpOut.getValue(2));
19668 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19669 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19671 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19672 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19673 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19677 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19678 SelectionDAG &DAG) {
19679 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19680 MVT DstVT = Op.getSimpleValueType();
19682 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19683 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19684 if (DstVT != MVT::f64)
19685 // This conversion needs to be expanded.
19688 SDValue InVec = Op->getOperand(0);
19690 unsigned NumElts = SrcVT.getVectorNumElements();
19691 EVT SVT = SrcVT.getVectorElementType();
19693 // Widen the vector in input in the case of MVT::v2i32.
19694 // Example: from MVT::v2i32 to MVT::v4i32.
19695 SmallVector<SDValue, 16> Elts;
19696 for (unsigned i = 0, e = NumElts; i != e; ++i)
19697 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19698 DAG.getIntPtrConstant(i)));
19700 // Explicitly mark the extra elements as Undef.
19701 SDValue Undef = DAG.getUNDEF(SVT);
19702 for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
19703 Elts.push_back(Undef);
19705 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19706 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19707 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19708 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19709 DAG.getIntPtrConstant(0));
19712 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19713 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19714 assert((DstVT == MVT::i64 ||
19715 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19716 "Unexpected custom BITCAST");
19717 // i64 <=> MMX conversions are Legal.
19718 if (SrcVT==MVT::i64 && DstVT.isVector())
19720 if (DstVT==MVT::i64 && SrcVT.isVector())
19722 // MMX <=> MMX conversions are Legal.
19723 if (SrcVT.isVector() && DstVT.isVector())
19725 // All other conversions need to be expanded.
19729 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19730 SelectionDAG &DAG) {
19731 SDNode *Node = Op.getNode();
19734 Op = Op.getOperand(0);
19735 EVT VT = Op.getValueType();
19736 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19737 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19739 unsigned NumElts = VT.getVectorNumElements();
19740 EVT EltVT = VT.getVectorElementType();
19741 unsigned Len = EltVT.getSizeInBits();
19743 // This is the vectorized version of the "best" algorithm from
19744 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19745 // with a minor tweak to use a series of adds + shifts instead of vector
19746 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19748 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19749 // v8i32 => Always profitable
19751 // FIXME: There a couple of possible improvements:
19753 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19754 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19756 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19757 "CTPOP not implemented for this vector element type.");
19759 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19760 // extra legalization.
19761 bool NeedsBitcast = EltVT == MVT::i32;
19762 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19764 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19765 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19766 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19768 // v = v - ((v >> 1) & 0x55555555...)
19769 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19770 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19771 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19773 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19775 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19776 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19778 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19780 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19781 if (VT != And.getValueType())
19782 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19783 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19785 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19786 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19787 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19788 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19789 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19791 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19792 if (NeedsBitcast) {
19793 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19794 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19795 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19798 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19799 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19800 if (VT != AndRHS.getValueType()) {
19801 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19802 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19804 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19806 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19807 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19808 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19809 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19810 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19812 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19813 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19814 if (NeedsBitcast) {
19815 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19816 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19818 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19819 if (VT != And.getValueType())
19820 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19822 // The algorithm mentioned above uses:
19823 // v = (v * 0x01010101...) >> (Len - 8)
19825 // Change it to use vector adds + vector shifts which yield faster results on
19826 // Haswell than using vector integer multiplication.
19828 // For i32 elements:
19829 // v = v + (v >> 8)
19830 // v = v + (v >> 16)
19832 // For i64 elements:
19833 // v = v + (v >> 8)
19834 // v = v + (v >> 16)
19835 // v = v + (v >> 32)
19838 SmallVector<SDValue, 8> Csts;
19839 for (unsigned i = 8; i <= Len/2; i *= 2) {
19840 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19841 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19842 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19843 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19847 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19848 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19849 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19850 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19851 if (NeedsBitcast) {
19852 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19853 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19855 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19856 if (VT != And.getValueType())
19857 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19862 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19863 SDNode *Node = Op.getNode();
19865 EVT T = Node->getValueType(0);
19866 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19867 DAG.getConstant(0, T), Node->getOperand(2));
19868 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19869 cast<AtomicSDNode>(Node)->getMemoryVT(),
19870 Node->getOperand(0),
19871 Node->getOperand(1), negOp,
19872 cast<AtomicSDNode>(Node)->getMemOperand(),
19873 cast<AtomicSDNode>(Node)->getOrdering(),
19874 cast<AtomicSDNode>(Node)->getSynchScope());
19877 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19878 SDNode *Node = Op.getNode();
19880 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19882 // Convert seq_cst store -> xchg
19883 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19884 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19885 // (The only way to get a 16-byte store is cmpxchg16b)
19886 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19887 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19888 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19889 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19890 cast<AtomicSDNode>(Node)->getMemoryVT(),
19891 Node->getOperand(0),
19892 Node->getOperand(1), Node->getOperand(2),
19893 cast<AtomicSDNode>(Node)->getMemOperand(),
19894 cast<AtomicSDNode>(Node)->getOrdering(),
19895 cast<AtomicSDNode>(Node)->getSynchScope());
19896 return Swap.getValue(1);
19898 // Other atomic stores have a simple pattern.
19902 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19903 EVT VT = Op.getNode()->getSimpleValueType(0);
19905 // Let legalize expand this if it isn't a legal type yet.
19906 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19909 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19912 bool ExtraOp = false;
19913 switch (Op.getOpcode()) {
19914 default: llvm_unreachable("Invalid code");
19915 case ISD::ADDC: Opc = X86ISD::ADD; break;
19916 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19917 case ISD::SUBC: Opc = X86ISD::SUB; break;
19918 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19922 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19924 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19925 Op.getOperand(1), Op.getOperand(2));
19928 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19929 SelectionDAG &DAG) {
19930 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19932 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19933 // which returns the values as { float, float } (in XMM0) or
19934 // { double, double } (which is returned in XMM0, XMM1).
19936 SDValue Arg = Op.getOperand(0);
19937 EVT ArgVT = Arg.getValueType();
19938 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19940 TargetLowering::ArgListTy Args;
19941 TargetLowering::ArgListEntry Entry;
19945 Entry.isSExt = false;
19946 Entry.isZExt = false;
19947 Args.push_back(Entry);
19949 bool isF64 = ArgVT == MVT::f64;
19950 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19951 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19952 // the results are returned via SRet in memory.
19953 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19954 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19955 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
19957 Type *RetTy = isF64
19958 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19959 : (Type*)VectorType::get(ArgTy, 4);
19961 TargetLowering::CallLoweringInfo CLI(DAG);
19962 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19963 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19965 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19968 // Returned in xmm0 and xmm1.
19969 return CallResult.first;
19971 // Returned in bits 0:31 and 32:64 xmm0.
19972 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19973 CallResult.first, DAG.getIntPtrConstant(0));
19974 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19975 CallResult.first, DAG.getIntPtrConstant(1));
19976 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19977 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19980 /// LowerOperation - Provide custom lowering hooks for some operations.
19982 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
19983 switch (Op.getOpcode()) {
19984 default: llvm_unreachable("Should not custom lower this!");
19985 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
19986 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
19987 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
19988 return LowerCMP_SWAP(Op, Subtarget, DAG);
19989 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
19990 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
19991 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
19992 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
19993 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
19994 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
19995 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
19996 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
19997 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
19998 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
19999 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
20000 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
20001 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
20002 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
20003 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
20004 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
20005 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
20006 case ISD::SHL_PARTS:
20007 case ISD::SRA_PARTS:
20008 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
20009 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
20010 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
20011 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
20012 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
20013 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
20014 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
20015 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
20016 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
20017 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
20018 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
20020 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
20021 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
20022 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
20023 case ISD::SETCC: return LowerSETCC(Op, DAG);
20024 case ISD::SELECT: return LowerSELECT(Op, DAG);
20025 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
20026 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
20027 case ISD::VASTART: return LowerVASTART(Op, DAG);
20028 case ISD::VAARG: return LowerVAARG(Op, DAG);
20029 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
20030 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
20031 case ISD::INTRINSIC_VOID:
20032 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
20033 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
20034 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
20035 case ISD::FRAME_TO_ARGS_OFFSET:
20036 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
20037 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
20038 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
20039 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
20040 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
20041 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
20042 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
20043 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
20044 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
20045 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
20046 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
20047 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
20048 case ISD::UMUL_LOHI:
20049 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
20052 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
20058 case ISD::UMULO: return LowerXALUO(Op, DAG);
20059 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
20060 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
20064 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
20065 case ISD::ADD: return LowerADD(Op, DAG);
20066 case ISD::SUB: return LowerSUB(Op, DAG);
20067 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
20071 /// ReplaceNodeResults - Replace a node with an illegal result type
20072 /// with a new node built out of custom code.
20073 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
20074 SmallVectorImpl<SDValue>&Results,
20075 SelectionDAG &DAG) const {
20077 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20078 switch (N->getOpcode()) {
20080 llvm_unreachable("Do not know how to custom type legalize this operation!");
20081 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
20082 case X86ISD::FMINC:
20084 case X86ISD::FMAXC:
20085 case X86ISD::FMAX: {
20086 EVT VT = N->getValueType(0);
20087 if (VT != MVT::v2f32)
20088 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
20089 SDValue UNDEF = DAG.getUNDEF(VT);
20090 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20091 N->getOperand(0), UNDEF);
20092 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20093 N->getOperand(1), UNDEF);
20094 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
20097 case ISD::SIGN_EXTEND_INREG:
20102 // We don't want to expand or promote these.
20109 case ISD::UDIVREM: {
20110 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
20111 Results.push_back(V);
20114 case ISD::FP_TO_SINT:
20115 case ISD::FP_TO_UINT: {
20116 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
20118 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
20121 std::pair<SDValue,SDValue> Vals =
20122 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
20123 SDValue FIST = Vals.first, StackSlot = Vals.second;
20124 if (FIST.getNode()) {
20125 EVT VT = N->getValueType(0);
20126 // Return a load from the stack slot.
20127 if (StackSlot.getNode())
20128 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
20129 MachinePointerInfo(),
20130 false, false, false, 0));
20132 Results.push_back(FIST);
20136 case ISD::UINT_TO_FP: {
20137 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20138 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
20139 N->getValueType(0) != MVT::v2f32)
20141 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
20143 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
20145 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
20146 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
20147 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
20148 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
20149 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
20150 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
20153 case ISD::FP_ROUND: {
20154 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
20156 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
20157 Results.push_back(V);
20160 case ISD::INTRINSIC_W_CHAIN: {
20161 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
20163 default : llvm_unreachable("Do not know how to custom type "
20164 "legalize this intrinsic operation!");
20165 case Intrinsic::x86_rdtsc:
20166 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20168 case Intrinsic::x86_rdtscp:
20169 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
20171 case Intrinsic::x86_rdpmc:
20172 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
20175 case ISD::READCYCLECOUNTER: {
20176 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20179 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
20180 EVT T = N->getValueType(0);
20181 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
20182 bool Regs64bit = T == MVT::i128;
20183 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
20184 SDValue cpInL, cpInH;
20185 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20186 DAG.getConstant(0, HalfT));
20187 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20188 DAG.getConstant(1, HalfT));
20189 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
20190 Regs64bit ? X86::RAX : X86::EAX,
20192 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
20193 Regs64bit ? X86::RDX : X86::EDX,
20194 cpInH, cpInL.getValue(1));
20195 SDValue swapInL, swapInH;
20196 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20197 DAG.getConstant(0, HalfT));
20198 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20199 DAG.getConstant(1, HalfT));
20200 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
20201 Regs64bit ? X86::RBX : X86::EBX,
20202 swapInL, cpInH.getValue(1));
20203 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
20204 Regs64bit ? X86::RCX : X86::ECX,
20205 swapInH, swapInL.getValue(1));
20206 SDValue Ops[] = { swapInH.getValue(0),
20208 swapInH.getValue(1) };
20209 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20210 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
20211 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
20212 X86ISD::LCMPXCHG8_DAG;
20213 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
20214 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
20215 Regs64bit ? X86::RAX : X86::EAX,
20216 HalfT, Result.getValue(1));
20217 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
20218 Regs64bit ? X86::RDX : X86::EDX,
20219 HalfT, cpOutL.getValue(2));
20220 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
20222 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
20223 MVT::i32, cpOutH.getValue(2));
20225 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20226 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
20227 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
20229 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
20230 Results.push_back(Success);
20231 Results.push_back(EFLAGS.getValue(1));
20234 case ISD::ATOMIC_SWAP:
20235 case ISD::ATOMIC_LOAD_ADD:
20236 case ISD::ATOMIC_LOAD_SUB:
20237 case ISD::ATOMIC_LOAD_AND:
20238 case ISD::ATOMIC_LOAD_OR:
20239 case ISD::ATOMIC_LOAD_XOR:
20240 case ISD::ATOMIC_LOAD_NAND:
20241 case ISD::ATOMIC_LOAD_MIN:
20242 case ISD::ATOMIC_LOAD_MAX:
20243 case ISD::ATOMIC_LOAD_UMIN:
20244 case ISD::ATOMIC_LOAD_UMAX:
20245 case ISD::ATOMIC_LOAD: {
20246 // Delegate to generic TypeLegalization. Situations we can really handle
20247 // should have already been dealt with by AtomicExpandPass.cpp.
20250 case ISD::BITCAST: {
20251 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20252 EVT DstVT = N->getValueType(0);
20253 EVT SrcVT = N->getOperand(0)->getValueType(0);
20255 if (SrcVT != MVT::f64 ||
20256 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
20259 unsigned NumElts = DstVT.getVectorNumElements();
20260 EVT SVT = DstVT.getVectorElementType();
20261 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20262 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
20263 MVT::v2f64, N->getOperand(0));
20264 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
20266 if (ExperimentalVectorWideningLegalization) {
20267 // If we are legalizing vectors by widening, we already have the desired
20268 // legal vector type, just return it.
20269 Results.push_back(ToVecInt);
20273 SmallVector<SDValue, 8> Elts;
20274 for (unsigned i = 0, e = NumElts; i != e; ++i)
20275 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
20276 ToVecInt, DAG.getIntPtrConstant(i)));
20278 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
20283 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
20285 default: return nullptr;
20286 case X86ISD::BSF: return "X86ISD::BSF";
20287 case X86ISD::BSR: return "X86ISD::BSR";
20288 case X86ISD::SHLD: return "X86ISD::SHLD";
20289 case X86ISD::SHRD: return "X86ISD::SHRD";
20290 case X86ISD::FAND: return "X86ISD::FAND";
20291 case X86ISD::FANDN: return "X86ISD::FANDN";
20292 case X86ISD::FOR: return "X86ISD::FOR";
20293 case X86ISD::FXOR: return "X86ISD::FXOR";
20294 case X86ISD::FSRL: return "X86ISD::FSRL";
20295 case X86ISD::FILD: return "X86ISD::FILD";
20296 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
20297 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
20298 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
20299 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
20300 case X86ISD::FLD: return "X86ISD::FLD";
20301 case X86ISD::FST: return "X86ISD::FST";
20302 case X86ISD::CALL: return "X86ISD::CALL";
20303 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
20304 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
20305 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
20306 case X86ISD::BT: return "X86ISD::BT";
20307 case X86ISD::CMP: return "X86ISD::CMP";
20308 case X86ISD::COMI: return "X86ISD::COMI";
20309 case X86ISD::UCOMI: return "X86ISD::UCOMI";
20310 case X86ISD::CMPM: return "X86ISD::CMPM";
20311 case X86ISD::CMPMU: return "X86ISD::CMPMU";
20312 case X86ISD::SETCC: return "X86ISD::SETCC";
20313 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
20314 case X86ISD::FSETCC: return "X86ISD::FSETCC";
20315 case X86ISD::CMOV: return "X86ISD::CMOV";
20316 case X86ISD::BRCOND: return "X86ISD::BRCOND";
20317 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
20318 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
20319 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
20320 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
20321 case X86ISD::Wrapper: return "X86ISD::Wrapper";
20322 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
20323 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20324 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20325 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20326 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20327 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20328 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20329 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20330 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20331 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20332 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20333 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20334 case X86ISD::HADD: return "X86ISD::HADD";
20335 case X86ISD::HSUB: return "X86ISD::HSUB";
20336 case X86ISD::FHADD: return "X86ISD::FHADD";
20337 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20338 case X86ISD::UMAX: return "X86ISD::UMAX";
20339 case X86ISD::UMIN: return "X86ISD::UMIN";
20340 case X86ISD::SMAX: return "X86ISD::SMAX";
20341 case X86ISD::SMIN: return "X86ISD::SMIN";
20342 case X86ISD::FMAX: return "X86ISD::FMAX";
20343 case X86ISD::FMIN: return "X86ISD::FMIN";
20344 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20345 case X86ISD::FMINC: return "X86ISD::FMINC";
20346 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20347 case X86ISD::FRCP: return "X86ISD::FRCP";
20348 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20349 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20350 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20351 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20352 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20353 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20354 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20355 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20356 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20357 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20358 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20359 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20360 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20361 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20362 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20363 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20364 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20365 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
20366 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20367 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20368 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20369 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20370 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20371 case X86ISD::VSHL: return "X86ISD::VSHL";
20372 case X86ISD::VSRL: return "X86ISD::VSRL";
20373 case X86ISD::VSRA: return "X86ISD::VSRA";
20374 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20375 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20376 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20377 case X86ISD::CMPP: return "X86ISD::CMPP";
20378 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20379 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20380 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20381 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20382 case X86ISD::ADD: return "X86ISD::ADD";
20383 case X86ISD::SUB: return "X86ISD::SUB";
20384 case X86ISD::ADC: return "X86ISD::ADC";
20385 case X86ISD::SBB: return "X86ISD::SBB";
20386 case X86ISD::SMUL: return "X86ISD::SMUL";
20387 case X86ISD::UMUL: return "X86ISD::UMUL";
20388 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20389 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20390 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20391 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20392 case X86ISD::INC: return "X86ISD::INC";
20393 case X86ISD::DEC: return "X86ISD::DEC";
20394 case X86ISD::OR: return "X86ISD::OR";
20395 case X86ISD::XOR: return "X86ISD::XOR";
20396 case X86ISD::AND: return "X86ISD::AND";
20397 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20398 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20399 case X86ISD::PTEST: return "X86ISD::PTEST";
20400 case X86ISD::TESTP: return "X86ISD::TESTP";
20401 case X86ISD::TESTM: return "X86ISD::TESTM";
20402 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20403 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20404 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20405 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20406 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20407 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20408 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20409 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20410 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20411 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20412 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20413 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20414 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20415 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20416 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20417 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20418 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20419 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20420 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20421 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20422 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20423 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20424 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20425 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20426 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20427 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20428 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20429 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20430 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20431 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20432 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20433 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20434 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20435 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20436 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20437 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20438 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20439 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20440 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20441 case X86ISD::SAHF: return "X86ISD::SAHF";
20442 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20443 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20444 case X86ISD::FMADD: return "X86ISD::FMADD";
20445 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20446 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20447 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20448 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20449 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20450 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20451 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20452 case X86ISD::XTEST: return "X86ISD::XTEST";
20453 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20454 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20455 case X86ISD::SELECT: return "X86ISD::SELECT";
20456 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20457 case X86ISD::RCP28: return "X86ISD::RCP28";
20458 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20462 // isLegalAddressingMode - Return true if the addressing mode represented
20463 // by AM is legal for this target, for a load/store of the specified type.
20464 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20466 // X86 supports extremely general addressing modes.
20467 CodeModel::Model M = getTargetMachine().getCodeModel();
20468 Reloc::Model R = getTargetMachine().getRelocationModel();
20470 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20471 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20476 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20478 // If a reference to this global requires an extra load, we can't fold it.
20479 if (isGlobalStubReference(GVFlags))
20482 // If BaseGV requires a register for the PIC base, we cannot also have a
20483 // BaseReg specified.
20484 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20487 // If lower 4G is not available, then we must use rip-relative addressing.
20488 if ((M != CodeModel::Small || R != Reloc::Static) &&
20489 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20493 switch (AM.Scale) {
20499 // These scales always work.
20504 // These scales are formed with basereg+scalereg. Only accept if there is
20509 default: // Other stuff never works.
20516 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20517 unsigned Bits = Ty->getScalarSizeInBits();
20519 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20520 // particularly cheaper than those without.
20524 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20525 // variable shifts just as cheap as scalar ones.
20526 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20529 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20530 // fully general vector.
20534 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20535 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20537 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20538 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20539 return NumBits1 > NumBits2;
20542 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20543 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20546 if (!isTypeLegal(EVT::getEVT(Ty1)))
20549 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20551 // Assuming the caller doesn't have a zeroext or signext return parameter,
20552 // truncation all the way down to i1 is valid.
20556 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20557 return isInt<32>(Imm);
20560 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20561 // Can also use sub to handle negated immediates.
20562 return isInt<32>(Imm);
20565 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20566 if (!VT1.isInteger() || !VT2.isInteger())
20568 unsigned NumBits1 = VT1.getSizeInBits();
20569 unsigned NumBits2 = VT2.getSizeInBits();
20570 return NumBits1 > NumBits2;
20573 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20574 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20575 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20578 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20579 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20580 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20583 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20584 EVT VT1 = Val.getValueType();
20585 if (isZExtFree(VT1, VT2))
20588 if (Val.getOpcode() != ISD::LOAD)
20591 if (!VT1.isSimple() || !VT1.isInteger() ||
20592 !VT2.isSimple() || !VT2.isInteger())
20595 switch (VT1.getSimpleVT().SimpleTy) {
20600 // X86 has 8, 16, and 32-bit zero-extending loads.
20607 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
20610 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20611 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20614 VT = VT.getScalarType();
20616 if (!VT.isSimple())
20619 switch (VT.getSimpleVT().SimpleTy) {
20630 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20631 // i16 instructions are longer (0x66 prefix) and potentially slower.
20632 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20635 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20636 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20637 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20638 /// are assumed to be legal.
20640 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20642 if (!VT.isSimple())
20645 MVT SVT = VT.getSimpleVT();
20647 // Very little shuffling can be done for 64-bit vectors right now.
20648 if (VT.getSizeInBits() == 64)
20651 // This is an experimental legality test that is tailored to match the
20652 // legality test of the experimental lowering more closely. They are gated
20653 // separately to ease testing of performance differences.
20654 if (ExperimentalVectorShuffleLegality)
20655 // We only care that the types being shuffled are legal. The lowering can
20656 // handle any possible shuffle mask that results.
20657 return isTypeLegal(SVT);
20659 // If this is a single-input shuffle with no 128 bit lane crossings we can
20660 // lower it into pshufb.
20661 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20662 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20663 bool isLegal = true;
20664 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20665 if (M[I] >= (int)SVT.getVectorNumElements() ||
20666 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20675 // FIXME: blends, shifts.
20676 return (SVT.getVectorNumElements() == 2 ||
20677 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20678 isMOVLMask(M, SVT) ||
20679 isCommutedMOVLMask(M, SVT) ||
20680 isMOVHLPSMask(M, SVT) ||
20681 isSHUFPMask(M, SVT) ||
20682 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20683 isPSHUFDMask(M, SVT) ||
20684 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20685 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20686 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20687 isPALIGNRMask(M, SVT, Subtarget) ||
20688 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20689 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20690 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20691 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20692 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20693 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20697 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20699 if (!VT.isSimple())
20702 MVT SVT = VT.getSimpleVT();
20704 // This is an experimental legality test that is tailored to match the
20705 // legality test of the experimental lowering more closely. They are gated
20706 // separately to ease testing of performance differences.
20707 if (ExperimentalVectorShuffleLegality)
20708 // The new vector shuffle lowering is very good at managing zero-inputs.
20709 return isShuffleMaskLegal(Mask, VT);
20711 unsigned NumElts = SVT.getVectorNumElements();
20712 // FIXME: This collection of masks seems suspect.
20715 if (NumElts == 4 && SVT.is128BitVector()) {
20716 return (isMOVLMask(Mask, SVT) ||
20717 isCommutedMOVLMask(Mask, SVT, true) ||
20718 isSHUFPMask(Mask, SVT) ||
20719 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20720 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20721 Subtarget->hasInt256()));
20726 //===----------------------------------------------------------------------===//
20727 // X86 Scheduler Hooks
20728 //===----------------------------------------------------------------------===//
20730 /// Utility function to emit xbegin specifying the start of an RTM region.
20731 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20732 const TargetInstrInfo *TII) {
20733 DebugLoc DL = MI->getDebugLoc();
20735 const BasicBlock *BB = MBB->getBasicBlock();
20736 MachineFunction::iterator I = MBB;
20739 // For the v = xbegin(), we generate
20750 MachineBasicBlock *thisMBB = MBB;
20751 MachineFunction *MF = MBB->getParent();
20752 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20753 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20754 MF->insert(I, mainMBB);
20755 MF->insert(I, sinkMBB);
20757 // Transfer the remainder of BB and its successor edges to sinkMBB.
20758 sinkMBB->splice(sinkMBB->begin(), MBB,
20759 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20760 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20764 // # fallthrough to mainMBB
20765 // # abortion to sinkMBB
20766 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20767 thisMBB->addSuccessor(mainMBB);
20768 thisMBB->addSuccessor(sinkMBB);
20772 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20773 mainMBB->addSuccessor(sinkMBB);
20776 // EAX is live into the sinkMBB
20777 sinkMBB->addLiveIn(X86::EAX);
20778 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20779 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20782 MI->eraseFromParent();
20786 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20787 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20788 // in the .td file.
20789 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20790 const TargetInstrInfo *TII) {
20792 switch (MI->getOpcode()) {
20793 default: llvm_unreachable("illegal opcode!");
20794 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20795 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20796 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20797 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20798 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20799 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20800 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20801 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20804 DebugLoc dl = MI->getDebugLoc();
20805 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20807 unsigned NumArgs = MI->getNumOperands();
20808 for (unsigned i = 1; i < NumArgs; ++i) {
20809 MachineOperand &Op = MI->getOperand(i);
20810 if (!(Op.isReg() && Op.isImplicit()))
20811 MIB.addOperand(Op);
20813 if (MI->hasOneMemOperand())
20814 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20816 BuildMI(*BB, MI, dl,
20817 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20818 .addReg(X86::XMM0);
20820 MI->eraseFromParent();
20824 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20825 // defs in an instruction pattern
20826 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20827 const TargetInstrInfo *TII) {
20829 switch (MI->getOpcode()) {
20830 default: llvm_unreachable("illegal opcode!");
20831 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20832 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20833 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20834 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20835 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20836 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20837 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20838 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20841 DebugLoc dl = MI->getDebugLoc();
20842 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20844 unsigned NumArgs = MI->getNumOperands(); // remove the results
20845 for (unsigned i = 1; i < NumArgs; ++i) {
20846 MachineOperand &Op = MI->getOperand(i);
20847 if (!(Op.isReg() && Op.isImplicit()))
20848 MIB.addOperand(Op);
20850 if (MI->hasOneMemOperand())
20851 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20853 BuildMI(*BB, MI, dl,
20854 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20857 MI->eraseFromParent();
20861 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20862 const X86Subtarget *Subtarget) {
20863 DebugLoc dl = MI->getDebugLoc();
20864 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20865 // Address into RAX/EAX, other two args into ECX, EDX.
20866 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20867 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20868 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20869 for (int i = 0; i < X86::AddrNumOperands; ++i)
20870 MIB.addOperand(MI->getOperand(i));
20872 unsigned ValOps = X86::AddrNumOperands;
20873 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20874 .addReg(MI->getOperand(ValOps).getReg());
20875 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20876 .addReg(MI->getOperand(ValOps+1).getReg());
20878 // The instruction doesn't actually take any operands though.
20879 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20881 MI->eraseFromParent(); // The pseudo is gone now.
20885 MachineBasicBlock *
20886 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
20887 MachineBasicBlock *MBB) const {
20888 // Emit va_arg instruction on X86-64.
20890 // Operands to this pseudo-instruction:
20891 // 0 ) Output : destination address (reg)
20892 // 1-5) Input : va_list address (addr, i64mem)
20893 // 6 ) ArgSize : Size (in bytes) of vararg type
20894 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20895 // 8 ) Align : Alignment of type
20896 // 9 ) EFLAGS (implicit-def)
20898 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20899 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20901 unsigned DestReg = MI->getOperand(0).getReg();
20902 MachineOperand &Base = MI->getOperand(1);
20903 MachineOperand &Scale = MI->getOperand(2);
20904 MachineOperand &Index = MI->getOperand(3);
20905 MachineOperand &Disp = MI->getOperand(4);
20906 MachineOperand &Segment = MI->getOperand(5);
20907 unsigned ArgSize = MI->getOperand(6).getImm();
20908 unsigned ArgMode = MI->getOperand(7).getImm();
20909 unsigned Align = MI->getOperand(8).getImm();
20911 // Memory Reference
20912 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20913 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20914 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20916 // Machine Information
20917 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20918 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20919 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20920 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20921 DebugLoc DL = MI->getDebugLoc();
20923 // struct va_list {
20926 // i64 overflow_area (address)
20927 // i64 reg_save_area (address)
20929 // sizeof(va_list) = 24
20930 // alignment(va_list) = 8
20932 unsigned TotalNumIntRegs = 6;
20933 unsigned TotalNumXMMRegs = 8;
20934 bool UseGPOffset = (ArgMode == 1);
20935 bool UseFPOffset = (ArgMode == 2);
20936 unsigned MaxOffset = TotalNumIntRegs * 8 +
20937 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
20939 /* Align ArgSize to a multiple of 8 */
20940 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
20941 bool NeedsAlign = (Align > 8);
20943 MachineBasicBlock *thisMBB = MBB;
20944 MachineBasicBlock *overflowMBB;
20945 MachineBasicBlock *offsetMBB;
20946 MachineBasicBlock *endMBB;
20948 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
20949 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
20950 unsigned OffsetReg = 0;
20952 if (!UseGPOffset && !UseFPOffset) {
20953 // If we only pull from the overflow region, we don't create a branch.
20954 // We don't need to alter control flow.
20955 OffsetDestReg = 0; // unused
20956 OverflowDestReg = DestReg;
20958 offsetMBB = nullptr;
20959 overflowMBB = thisMBB;
20962 // First emit code to check if gp_offset (or fp_offset) is below the bound.
20963 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
20964 // If not, pull from overflow_area. (branch to overflowMBB)
20969 // offsetMBB overflowMBB
20974 // Registers for the PHI in endMBB
20975 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
20976 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
20978 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20979 MachineFunction *MF = MBB->getParent();
20980 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20981 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20982 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20984 MachineFunction::iterator MBBIter = MBB;
20987 // Insert the new basic blocks
20988 MF->insert(MBBIter, offsetMBB);
20989 MF->insert(MBBIter, overflowMBB);
20990 MF->insert(MBBIter, endMBB);
20992 // Transfer the remainder of MBB and its successor edges to endMBB.
20993 endMBB->splice(endMBB->begin(), thisMBB,
20994 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
20995 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
20997 // Make offsetMBB and overflowMBB successors of thisMBB
20998 thisMBB->addSuccessor(offsetMBB);
20999 thisMBB->addSuccessor(overflowMBB);
21001 // endMBB is a successor of both offsetMBB and overflowMBB
21002 offsetMBB->addSuccessor(endMBB);
21003 overflowMBB->addSuccessor(endMBB);
21005 // Load the offset value into a register
21006 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
21007 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
21011 .addDisp(Disp, UseFPOffset ? 4 : 0)
21012 .addOperand(Segment)
21013 .setMemRefs(MMOBegin, MMOEnd);
21015 // Check if there is enough room left to pull this argument.
21016 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
21018 .addImm(MaxOffset + 8 - ArgSizeA8);
21020 // Branch to "overflowMBB" if offset >= max
21021 // Fall through to "offsetMBB" otherwise
21022 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
21023 .addMBB(overflowMBB);
21026 // In offsetMBB, emit code to use the reg_save_area.
21028 assert(OffsetReg != 0);
21030 // Read the reg_save_area address.
21031 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
21032 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
21037 .addOperand(Segment)
21038 .setMemRefs(MMOBegin, MMOEnd);
21040 // Zero-extend the offset
21041 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
21042 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
21045 .addImm(X86::sub_32bit);
21047 // Add the offset to the reg_save_area to get the final address.
21048 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
21049 .addReg(OffsetReg64)
21050 .addReg(RegSaveReg);
21052 // Compute the offset for the next argument
21053 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
21054 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
21056 .addImm(UseFPOffset ? 16 : 8);
21058 // Store it back into the va_list.
21059 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
21063 .addDisp(Disp, UseFPOffset ? 4 : 0)
21064 .addOperand(Segment)
21065 .addReg(NextOffsetReg)
21066 .setMemRefs(MMOBegin, MMOEnd);
21069 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
21074 // Emit code to use overflow area
21077 // Load the overflow_area address into a register.
21078 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
21079 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
21084 .addOperand(Segment)
21085 .setMemRefs(MMOBegin, MMOEnd);
21087 // If we need to align it, do so. Otherwise, just copy the address
21088 // to OverflowDestReg.
21090 // Align the overflow address
21091 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
21092 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
21094 // aligned_addr = (addr + (align-1)) & ~(align-1)
21095 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
21096 .addReg(OverflowAddrReg)
21099 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
21101 .addImm(~(uint64_t)(Align-1));
21103 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
21104 .addReg(OverflowAddrReg);
21107 // Compute the next overflow address after this argument.
21108 // (the overflow address should be kept 8-byte aligned)
21109 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
21110 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
21111 .addReg(OverflowDestReg)
21112 .addImm(ArgSizeA8);
21114 // Store the new overflow address.
21115 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
21120 .addOperand(Segment)
21121 .addReg(NextAddrReg)
21122 .setMemRefs(MMOBegin, MMOEnd);
21124 // If we branched, emit the PHI to the front of endMBB.
21126 BuildMI(*endMBB, endMBB->begin(), DL,
21127 TII->get(X86::PHI), DestReg)
21128 .addReg(OffsetDestReg).addMBB(offsetMBB)
21129 .addReg(OverflowDestReg).addMBB(overflowMBB);
21132 // Erase the pseudo instruction
21133 MI->eraseFromParent();
21138 MachineBasicBlock *
21139 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
21141 MachineBasicBlock *MBB) const {
21142 // Emit code to save XMM registers to the stack. The ABI says that the
21143 // number of registers to save is given in %al, so it's theoretically
21144 // possible to do an indirect jump trick to avoid saving all of them,
21145 // however this code takes a simpler approach and just executes all
21146 // of the stores if %al is non-zero. It's less code, and it's probably
21147 // easier on the hardware branch predictor, and stores aren't all that
21148 // expensive anyway.
21150 // Create the new basic blocks. One block contains all the XMM stores,
21151 // and one block is the final destination regardless of whether any
21152 // stores were performed.
21153 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21154 MachineFunction *F = MBB->getParent();
21155 MachineFunction::iterator MBBIter = MBB;
21157 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
21158 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
21159 F->insert(MBBIter, XMMSaveMBB);
21160 F->insert(MBBIter, EndMBB);
21162 // Transfer the remainder of MBB and its successor edges to EndMBB.
21163 EndMBB->splice(EndMBB->begin(), MBB,
21164 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21165 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
21167 // The original block will now fall through to the XMM save block.
21168 MBB->addSuccessor(XMMSaveMBB);
21169 // The XMMSaveMBB will fall through to the end block.
21170 XMMSaveMBB->addSuccessor(EndMBB);
21172 // Now add the instructions.
21173 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21174 DebugLoc DL = MI->getDebugLoc();
21176 unsigned CountReg = MI->getOperand(0).getReg();
21177 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
21178 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
21180 if (!Subtarget->isTargetWin64()) {
21181 // If %al is 0, branch around the XMM save block.
21182 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
21183 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
21184 MBB->addSuccessor(EndMBB);
21187 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
21188 // that was just emitted, but clearly shouldn't be "saved".
21189 assert((MI->getNumOperands() <= 3 ||
21190 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
21191 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
21192 && "Expected last argument to be EFLAGS");
21193 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
21194 // In the XMM save block, save all the XMM argument registers.
21195 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
21196 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
21197 MachineMemOperand *MMO =
21198 F->getMachineMemOperand(
21199 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
21200 MachineMemOperand::MOStore,
21201 /*Size=*/16, /*Align=*/16);
21202 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
21203 .addFrameIndex(RegSaveFrameIndex)
21204 .addImm(/*Scale=*/1)
21205 .addReg(/*IndexReg=*/0)
21206 .addImm(/*Disp=*/Offset)
21207 .addReg(/*Segment=*/0)
21208 .addReg(MI->getOperand(i).getReg())
21209 .addMemOperand(MMO);
21212 MI->eraseFromParent(); // The pseudo instruction is gone now.
21217 // The EFLAGS operand of SelectItr might be missing a kill marker
21218 // because there were multiple uses of EFLAGS, and ISel didn't know
21219 // which to mark. Figure out whether SelectItr should have had a
21220 // kill marker, and set it if it should. Returns the correct kill
21222 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
21223 MachineBasicBlock* BB,
21224 const TargetRegisterInfo* TRI) {
21225 // Scan forward through BB for a use/def of EFLAGS.
21226 MachineBasicBlock::iterator miI(std::next(SelectItr));
21227 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
21228 const MachineInstr& mi = *miI;
21229 if (mi.readsRegister(X86::EFLAGS))
21231 if (mi.definesRegister(X86::EFLAGS))
21232 break; // Should have kill-flag - update below.
21235 // If we hit the end of the block, check whether EFLAGS is live into a
21237 if (miI == BB->end()) {
21238 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
21239 sEnd = BB->succ_end();
21240 sItr != sEnd; ++sItr) {
21241 MachineBasicBlock* succ = *sItr;
21242 if (succ->isLiveIn(X86::EFLAGS))
21247 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
21248 // out. SelectMI should have a kill flag on EFLAGS.
21249 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
21253 MachineBasicBlock *
21254 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
21255 MachineBasicBlock *BB) const {
21256 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21257 DebugLoc DL = MI->getDebugLoc();
21259 // To "insert" a SELECT_CC instruction, we actually have to insert the
21260 // diamond control-flow pattern. The incoming instruction knows the
21261 // destination vreg to set, the condition code register to branch on, the
21262 // true/false values to select between, and a branch opcode to use.
21263 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21264 MachineFunction::iterator It = BB;
21270 // cmpTY ccX, r1, r2
21272 // fallthrough --> copy0MBB
21273 MachineBasicBlock *thisMBB = BB;
21274 MachineFunction *F = BB->getParent();
21275 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
21276 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
21277 F->insert(It, copy0MBB);
21278 F->insert(It, sinkMBB);
21280 // If the EFLAGS register isn't dead in the terminator, then claim that it's
21281 // live into the sink and copy blocks.
21282 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
21283 if (!MI->killsRegister(X86::EFLAGS) &&
21284 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
21285 copy0MBB->addLiveIn(X86::EFLAGS);
21286 sinkMBB->addLiveIn(X86::EFLAGS);
21289 // Transfer the remainder of BB and its successor edges to sinkMBB.
21290 sinkMBB->splice(sinkMBB->begin(), BB,
21291 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21292 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
21294 // Add the true and fallthrough blocks as its successors.
21295 BB->addSuccessor(copy0MBB);
21296 BB->addSuccessor(sinkMBB);
21298 // Create the conditional branch instruction.
21300 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
21301 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
21304 // %FalseValue = ...
21305 // # fallthrough to sinkMBB
21306 copy0MBB->addSuccessor(sinkMBB);
21309 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
21311 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21312 TII->get(X86::PHI), MI->getOperand(0).getReg())
21313 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
21314 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
21316 MI->eraseFromParent(); // The pseudo instruction is gone now.
21320 MachineBasicBlock *
21321 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
21322 MachineBasicBlock *BB) const {
21323 MachineFunction *MF = BB->getParent();
21324 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21325 DebugLoc DL = MI->getDebugLoc();
21326 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21328 assert(MF->shouldSplitStack());
21330 const bool Is64Bit = Subtarget->is64Bit();
21331 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21333 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21334 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21337 // ... [Till the alloca]
21338 // If stacklet is not large enough, jump to mallocMBB
21341 // Allocate by subtracting from RSP
21342 // Jump to continueMBB
21345 // Allocate by call to runtime
21349 // [rest of original BB]
21352 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21353 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21354 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21356 MachineRegisterInfo &MRI = MF->getRegInfo();
21357 const TargetRegisterClass *AddrRegClass =
21358 getRegClassFor(getPointerTy());
21360 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21361 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21362 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21363 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21364 sizeVReg = MI->getOperand(1).getReg(),
21365 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21367 MachineFunction::iterator MBBIter = BB;
21370 MF->insert(MBBIter, bumpMBB);
21371 MF->insert(MBBIter, mallocMBB);
21372 MF->insert(MBBIter, continueMBB);
21374 continueMBB->splice(continueMBB->begin(), BB,
21375 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21376 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21378 // Add code to the main basic block to check if the stack limit has been hit,
21379 // and if so, jump to mallocMBB otherwise to bumpMBB.
21380 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21381 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21382 .addReg(tmpSPVReg).addReg(sizeVReg);
21383 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21384 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21385 .addReg(SPLimitVReg);
21386 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21388 // bumpMBB simply decreases the stack pointer, since we know the current
21389 // stacklet has enough space.
21390 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21391 .addReg(SPLimitVReg);
21392 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21393 .addReg(SPLimitVReg);
21394 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21396 // Calls into a routine in libgcc to allocate more space from the heap.
21397 const uint32_t *RegMask =
21398 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21400 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21402 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21403 .addExternalSymbol("__morestack_allocate_stack_space")
21404 .addRegMask(RegMask)
21405 .addReg(X86::RDI, RegState::Implicit)
21406 .addReg(X86::RAX, RegState::ImplicitDefine);
21407 } else if (Is64Bit) {
21408 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21410 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21411 .addExternalSymbol("__morestack_allocate_stack_space")
21412 .addRegMask(RegMask)
21413 .addReg(X86::EDI, RegState::Implicit)
21414 .addReg(X86::EAX, RegState::ImplicitDefine);
21416 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21418 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21419 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21420 .addExternalSymbol("__morestack_allocate_stack_space")
21421 .addRegMask(RegMask)
21422 .addReg(X86::EAX, RegState::ImplicitDefine);
21426 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21429 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21430 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21431 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21433 // Set up the CFG correctly.
21434 BB->addSuccessor(bumpMBB);
21435 BB->addSuccessor(mallocMBB);
21436 mallocMBB->addSuccessor(continueMBB);
21437 bumpMBB->addSuccessor(continueMBB);
21439 // Take care of the PHI nodes.
21440 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21441 MI->getOperand(0).getReg())
21442 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21443 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21445 // Delete the original pseudo instruction.
21446 MI->eraseFromParent();
21449 return continueMBB;
21452 MachineBasicBlock *
21453 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21454 MachineBasicBlock *BB) const {
21455 DebugLoc DL = MI->getDebugLoc();
21457 assert(!Subtarget->isTargetMachO());
21459 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21461 MI->eraseFromParent(); // The pseudo instruction is gone now.
21465 MachineBasicBlock *
21466 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21467 MachineBasicBlock *BB) const {
21468 // This is pretty easy. We're taking the value that we received from
21469 // our load from the relocation, sticking it in either RDI (x86-64)
21470 // or EAX and doing an indirect call. The return value will then
21471 // be in the normal return register.
21472 MachineFunction *F = BB->getParent();
21473 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21474 DebugLoc DL = MI->getDebugLoc();
21476 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21477 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21479 // Get a register mask for the lowered call.
21480 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21481 // proper register mask.
21482 const uint32_t *RegMask =
21483 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21484 if (Subtarget->is64Bit()) {
21485 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21486 TII->get(X86::MOV64rm), X86::RDI)
21488 .addImm(0).addReg(0)
21489 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21490 MI->getOperand(3).getTargetFlags())
21492 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21493 addDirectMem(MIB, X86::RDI);
21494 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21495 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21496 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21497 TII->get(X86::MOV32rm), X86::EAX)
21499 .addImm(0).addReg(0)
21500 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21501 MI->getOperand(3).getTargetFlags())
21503 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21504 addDirectMem(MIB, X86::EAX);
21505 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21507 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21508 TII->get(X86::MOV32rm), X86::EAX)
21509 .addReg(TII->getGlobalBaseReg(F))
21510 .addImm(0).addReg(0)
21511 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21512 MI->getOperand(3).getTargetFlags())
21514 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21515 addDirectMem(MIB, X86::EAX);
21516 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21519 MI->eraseFromParent(); // The pseudo instruction is gone now.
21523 MachineBasicBlock *
21524 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21525 MachineBasicBlock *MBB) const {
21526 DebugLoc DL = MI->getDebugLoc();
21527 MachineFunction *MF = MBB->getParent();
21528 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21529 MachineRegisterInfo &MRI = MF->getRegInfo();
21531 const BasicBlock *BB = MBB->getBasicBlock();
21532 MachineFunction::iterator I = MBB;
21535 // Memory Reference
21536 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21537 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21540 unsigned MemOpndSlot = 0;
21542 unsigned CurOp = 0;
21544 DstReg = MI->getOperand(CurOp++).getReg();
21545 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21546 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21547 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21548 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21550 MemOpndSlot = CurOp;
21552 MVT PVT = getPointerTy();
21553 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21554 "Invalid Pointer Size!");
21556 // For v = setjmp(buf), we generate
21559 // buf[LabelOffset] = restoreMBB
21560 // SjLjSetup restoreMBB
21566 // v = phi(main, restore)
21569 // if base pointer being used, load it from frame
21572 MachineBasicBlock *thisMBB = MBB;
21573 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21574 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21575 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21576 MF->insert(I, mainMBB);
21577 MF->insert(I, sinkMBB);
21578 MF->push_back(restoreMBB);
21580 MachineInstrBuilder MIB;
21582 // Transfer the remainder of BB and its successor edges to sinkMBB.
21583 sinkMBB->splice(sinkMBB->begin(), MBB,
21584 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21585 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21588 unsigned PtrStoreOpc = 0;
21589 unsigned LabelReg = 0;
21590 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21591 Reloc::Model RM = MF->getTarget().getRelocationModel();
21592 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21593 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21595 // Prepare IP either in reg or imm.
21596 if (!UseImmLabel) {
21597 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21598 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21599 LabelReg = MRI.createVirtualRegister(PtrRC);
21600 if (Subtarget->is64Bit()) {
21601 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21605 .addMBB(restoreMBB)
21608 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21609 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21610 .addReg(XII->getGlobalBaseReg(MF))
21613 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21617 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21619 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21620 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21621 if (i == X86::AddrDisp)
21622 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21624 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21627 MIB.addReg(LabelReg);
21629 MIB.addMBB(restoreMBB);
21630 MIB.setMemRefs(MMOBegin, MMOEnd);
21632 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21633 .addMBB(restoreMBB);
21635 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21636 MIB.addRegMask(RegInfo->getNoPreservedMask());
21637 thisMBB->addSuccessor(mainMBB);
21638 thisMBB->addSuccessor(restoreMBB);
21642 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21643 mainMBB->addSuccessor(sinkMBB);
21646 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21647 TII->get(X86::PHI), DstReg)
21648 .addReg(mainDstReg).addMBB(mainMBB)
21649 .addReg(restoreDstReg).addMBB(restoreMBB);
21652 if (RegInfo->hasBasePointer(*MF)) {
21653 const bool Uses64BitFramePtr =
21654 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
21655 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21656 X86FI->setRestoreBasePointer(MF);
21657 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21658 unsigned BasePtr = RegInfo->getBaseRegister();
21659 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21660 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21661 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21662 .setMIFlag(MachineInstr::FrameSetup);
21664 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21665 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21666 restoreMBB->addSuccessor(sinkMBB);
21668 MI->eraseFromParent();
21672 MachineBasicBlock *
21673 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21674 MachineBasicBlock *MBB) const {
21675 DebugLoc DL = MI->getDebugLoc();
21676 MachineFunction *MF = MBB->getParent();
21677 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21678 MachineRegisterInfo &MRI = MF->getRegInfo();
21680 // Memory Reference
21681 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21682 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21684 MVT PVT = getPointerTy();
21685 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21686 "Invalid Pointer Size!");
21688 const TargetRegisterClass *RC =
21689 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21690 unsigned Tmp = MRI.createVirtualRegister(RC);
21691 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21692 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21693 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21694 unsigned SP = RegInfo->getStackRegister();
21696 MachineInstrBuilder MIB;
21698 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21699 const int64_t SPOffset = 2 * PVT.getStoreSize();
21701 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21702 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21705 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21706 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21707 MIB.addOperand(MI->getOperand(i));
21708 MIB.setMemRefs(MMOBegin, MMOEnd);
21710 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21711 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21712 if (i == X86::AddrDisp)
21713 MIB.addDisp(MI->getOperand(i), LabelOffset);
21715 MIB.addOperand(MI->getOperand(i));
21717 MIB.setMemRefs(MMOBegin, MMOEnd);
21719 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21720 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21721 if (i == X86::AddrDisp)
21722 MIB.addDisp(MI->getOperand(i), SPOffset);
21724 MIB.addOperand(MI->getOperand(i));
21726 MIB.setMemRefs(MMOBegin, MMOEnd);
21728 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21730 MI->eraseFromParent();
21734 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21735 // accumulator loops. Writing back to the accumulator allows the coalescer
21736 // to remove extra copies in the loop.
21737 MachineBasicBlock *
21738 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21739 MachineBasicBlock *MBB) const {
21740 MachineOperand &AddendOp = MI->getOperand(3);
21742 // Bail out early if the addend isn't a register - we can't switch these.
21743 if (!AddendOp.isReg())
21746 MachineFunction &MF = *MBB->getParent();
21747 MachineRegisterInfo &MRI = MF.getRegInfo();
21749 // Check whether the addend is defined by a PHI:
21750 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21751 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21752 if (!AddendDef.isPHI())
21755 // Look for the following pattern:
21757 // %addend = phi [%entry, 0], [%loop, %result]
21759 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21763 // %addend = phi [%entry, 0], [%loop, %result]
21765 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21767 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21768 assert(AddendDef.getOperand(i).isReg());
21769 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21770 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21771 if (&PHISrcInst == MI) {
21772 // Found a matching instruction.
21773 unsigned NewFMAOpc = 0;
21774 switch (MI->getOpcode()) {
21775 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21776 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21777 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21778 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21779 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21780 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21781 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21782 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21783 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21784 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21785 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21786 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21787 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21788 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21789 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21790 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21791 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21792 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21793 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21794 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21796 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21797 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21798 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21799 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21800 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21801 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21802 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21803 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21804 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21805 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21806 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21807 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21808 default: llvm_unreachable("Unrecognized FMA variant.");
21811 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
21812 MachineInstrBuilder MIB =
21813 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21814 .addOperand(MI->getOperand(0))
21815 .addOperand(MI->getOperand(3))
21816 .addOperand(MI->getOperand(2))
21817 .addOperand(MI->getOperand(1));
21818 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21819 MI->eraseFromParent();
21826 MachineBasicBlock *
21827 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21828 MachineBasicBlock *BB) const {
21829 switch (MI->getOpcode()) {
21830 default: llvm_unreachable("Unexpected instr type to insert");
21831 case X86::TAILJMPd64:
21832 case X86::TAILJMPr64:
21833 case X86::TAILJMPm64:
21834 case X86::TAILJMPd64_REX:
21835 case X86::TAILJMPr64_REX:
21836 case X86::TAILJMPm64_REX:
21837 llvm_unreachable("TAILJMP64 would not be touched here.");
21838 case X86::TCRETURNdi64:
21839 case X86::TCRETURNri64:
21840 case X86::TCRETURNmi64:
21842 case X86::WIN_ALLOCA:
21843 return EmitLoweredWinAlloca(MI, BB);
21844 case X86::SEG_ALLOCA_32:
21845 case X86::SEG_ALLOCA_64:
21846 return EmitLoweredSegAlloca(MI, BB);
21847 case X86::TLSCall_32:
21848 case X86::TLSCall_64:
21849 return EmitLoweredTLSCall(MI, BB);
21850 case X86::CMOV_GR8:
21851 case X86::CMOV_FR32:
21852 case X86::CMOV_FR64:
21853 case X86::CMOV_V4F32:
21854 case X86::CMOV_V2F64:
21855 case X86::CMOV_V2I64:
21856 case X86::CMOV_V8F32:
21857 case X86::CMOV_V4F64:
21858 case X86::CMOV_V4I64:
21859 case X86::CMOV_V16F32:
21860 case X86::CMOV_V8F64:
21861 case X86::CMOV_V8I64:
21862 case X86::CMOV_GR16:
21863 case X86::CMOV_GR32:
21864 case X86::CMOV_RFP32:
21865 case X86::CMOV_RFP64:
21866 case X86::CMOV_RFP80:
21867 return EmitLoweredSelect(MI, BB);
21869 case X86::FP32_TO_INT16_IN_MEM:
21870 case X86::FP32_TO_INT32_IN_MEM:
21871 case X86::FP32_TO_INT64_IN_MEM:
21872 case X86::FP64_TO_INT16_IN_MEM:
21873 case X86::FP64_TO_INT32_IN_MEM:
21874 case X86::FP64_TO_INT64_IN_MEM:
21875 case X86::FP80_TO_INT16_IN_MEM:
21876 case X86::FP80_TO_INT32_IN_MEM:
21877 case X86::FP80_TO_INT64_IN_MEM: {
21878 MachineFunction *F = BB->getParent();
21879 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21880 DebugLoc DL = MI->getDebugLoc();
21882 // Change the floating point control register to use "round towards zero"
21883 // mode when truncating to an integer value.
21884 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21885 addFrameReference(BuildMI(*BB, MI, DL,
21886 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21888 // Load the old value of the high byte of the control word...
21890 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21891 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21894 // Set the high part to be round to zero...
21895 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21898 // Reload the modified control word now...
21899 addFrameReference(BuildMI(*BB, MI, DL,
21900 TII->get(X86::FLDCW16m)), CWFrameIdx);
21902 // Restore the memory image of control word to original value
21903 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21906 // Get the X86 opcode to use.
21908 switch (MI->getOpcode()) {
21909 default: llvm_unreachable("illegal opcode!");
21910 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21911 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21912 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21913 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21914 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21915 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21916 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21917 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21918 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21922 MachineOperand &Op = MI->getOperand(0);
21924 AM.BaseType = X86AddressMode::RegBase;
21925 AM.Base.Reg = Op.getReg();
21927 AM.BaseType = X86AddressMode::FrameIndexBase;
21928 AM.Base.FrameIndex = Op.getIndex();
21930 Op = MI->getOperand(1);
21932 AM.Scale = Op.getImm();
21933 Op = MI->getOperand(2);
21935 AM.IndexReg = Op.getImm();
21936 Op = MI->getOperand(3);
21937 if (Op.isGlobal()) {
21938 AM.GV = Op.getGlobal();
21940 AM.Disp = Op.getImm();
21942 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
21943 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
21945 // Reload the original control word now.
21946 addFrameReference(BuildMI(*BB, MI, DL,
21947 TII->get(X86::FLDCW16m)), CWFrameIdx);
21949 MI->eraseFromParent(); // The pseudo instruction is gone now.
21952 // String/text processing lowering.
21953 case X86::PCMPISTRM128REG:
21954 case X86::VPCMPISTRM128REG:
21955 case X86::PCMPISTRM128MEM:
21956 case X86::VPCMPISTRM128MEM:
21957 case X86::PCMPESTRM128REG:
21958 case X86::VPCMPESTRM128REG:
21959 case X86::PCMPESTRM128MEM:
21960 case X86::VPCMPESTRM128MEM:
21961 assert(Subtarget->hasSSE42() &&
21962 "Target must have SSE4.2 or AVX features enabled");
21963 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
21965 // String/text processing lowering.
21966 case X86::PCMPISTRIREG:
21967 case X86::VPCMPISTRIREG:
21968 case X86::PCMPISTRIMEM:
21969 case X86::VPCMPISTRIMEM:
21970 case X86::PCMPESTRIREG:
21971 case X86::VPCMPESTRIREG:
21972 case X86::PCMPESTRIMEM:
21973 case X86::VPCMPESTRIMEM:
21974 assert(Subtarget->hasSSE42() &&
21975 "Target must have SSE4.2 or AVX features enabled");
21976 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
21978 // Thread synchronization.
21980 return EmitMonitor(MI, BB, Subtarget);
21984 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
21986 case X86::VASTART_SAVE_XMM_REGS:
21987 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
21989 case X86::VAARG_64:
21990 return EmitVAARG64WithCustomInserter(MI, BB);
21992 case X86::EH_SjLj_SetJmp32:
21993 case X86::EH_SjLj_SetJmp64:
21994 return emitEHSjLjSetJmp(MI, BB);
21996 case X86::EH_SjLj_LongJmp32:
21997 case X86::EH_SjLj_LongJmp64:
21998 return emitEHSjLjLongJmp(MI, BB);
22000 case TargetOpcode::STATEPOINT:
22001 // As an implementation detail, STATEPOINT shares the STACKMAP format at
22002 // this point in the process. We diverge later.
22003 return emitPatchPoint(MI, BB);
22005 case TargetOpcode::STACKMAP:
22006 case TargetOpcode::PATCHPOINT:
22007 return emitPatchPoint(MI, BB);
22009 case X86::VFMADDPDr213r:
22010 case X86::VFMADDPSr213r:
22011 case X86::VFMADDSDr213r:
22012 case X86::VFMADDSSr213r:
22013 case X86::VFMSUBPDr213r:
22014 case X86::VFMSUBPSr213r:
22015 case X86::VFMSUBSDr213r:
22016 case X86::VFMSUBSSr213r:
22017 case X86::VFNMADDPDr213r:
22018 case X86::VFNMADDPSr213r:
22019 case X86::VFNMADDSDr213r:
22020 case X86::VFNMADDSSr213r:
22021 case X86::VFNMSUBPDr213r:
22022 case X86::VFNMSUBPSr213r:
22023 case X86::VFNMSUBSDr213r:
22024 case X86::VFNMSUBSSr213r:
22025 case X86::VFMADDSUBPDr213r:
22026 case X86::VFMADDSUBPSr213r:
22027 case X86::VFMSUBADDPDr213r:
22028 case X86::VFMSUBADDPSr213r:
22029 case X86::VFMADDPDr213rY:
22030 case X86::VFMADDPSr213rY:
22031 case X86::VFMSUBPDr213rY:
22032 case X86::VFMSUBPSr213rY:
22033 case X86::VFNMADDPDr213rY:
22034 case X86::VFNMADDPSr213rY:
22035 case X86::VFNMSUBPDr213rY:
22036 case X86::VFNMSUBPSr213rY:
22037 case X86::VFMADDSUBPDr213rY:
22038 case X86::VFMADDSUBPSr213rY:
22039 case X86::VFMSUBADDPDr213rY:
22040 case X86::VFMSUBADDPSr213rY:
22041 return emitFMA3Instr(MI, BB);
22045 //===----------------------------------------------------------------------===//
22046 // X86 Optimization Hooks
22047 //===----------------------------------------------------------------------===//
22049 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
22052 const SelectionDAG &DAG,
22053 unsigned Depth) const {
22054 unsigned BitWidth = KnownZero.getBitWidth();
22055 unsigned Opc = Op.getOpcode();
22056 assert((Opc >= ISD::BUILTIN_OP_END ||
22057 Opc == ISD::INTRINSIC_WO_CHAIN ||
22058 Opc == ISD::INTRINSIC_W_CHAIN ||
22059 Opc == ISD::INTRINSIC_VOID) &&
22060 "Should use MaskedValueIsZero if you don't know whether Op"
22061 " is a target node!");
22063 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
22077 // These nodes' second result is a boolean.
22078 if (Op.getResNo() == 0)
22081 case X86ISD::SETCC:
22082 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
22084 case ISD::INTRINSIC_WO_CHAIN: {
22085 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
22086 unsigned NumLoBits = 0;
22089 case Intrinsic::x86_sse_movmsk_ps:
22090 case Intrinsic::x86_avx_movmsk_ps_256:
22091 case Intrinsic::x86_sse2_movmsk_pd:
22092 case Intrinsic::x86_avx_movmsk_pd_256:
22093 case Intrinsic::x86_mmx_pmovmskb:
22094 case Intrinsic::x86_sse2_pmovmskb_128:
22095 case Intrinsic::x86_avx2_pmovmskb: {
22096 // High bits of movmskp{s|d}, pmovmskb are known zero.
22098 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
22099 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
22100 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
22101 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
22102 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
22103 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
22104 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
22105 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
22107 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
22116 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
22118 const SelectionDAG &,
22119 unsigned Depth) const {
22120 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
22121 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
22122 return Op.getValueType().getScalarType().getSizeInBits();
22128 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
22129 /// node is a GlobalAddress + offset.
22130 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
22131 const GlobalValue* &GA,
22132 int64_t &Offset) const {
22133 if (N->getOpcode() == X86ISD::Wrapper) {
22134 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
22135 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
22136 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
22140 return TargetLowering::isGAPlusOffset(N, GA, Offset);
22143 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
22144 /// same as extracting the high 128-bit part of 256-bit vector and then
22145 /// inserting the result into the low part of a new 256-bit vector
22146 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
22147 EVT VT = SVOp->getValueType(0);
22148 unsigned NumElems = VT.getVectorNumElements();
22150 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22151 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
22152 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22153 SVOp->getMaskElt(j) >= 0)
22159 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
22160 /// same as extracting the low 128-bit part of 256-bit vector and then
22161 /// inserting the result into the high part of a new 256-bit vector
22162 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
22163 EVT VT = SVOp->getValueType(0);
22164 unsigned NumElems = VT.getVectorNumElements();
22166 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22167 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
22168 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22169 SVOp->getMaskElt(j) >= 0)
22175 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
22176 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
22177 TargetLowering::DAGCombinerInfo &DCI,
22178 const X86Subtarget* Subtarget) {
22180 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22181 SDValue V1 = SVOp->getOperand(0);
22182 SDValue V2 = SVOp->getOperand(1);
22183 EVT VT = SVOp->getValueType(0);
22184 unsigned NumElems = VT.getVectorNumElements();
22186 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
22187 V2.getOpcode() == ISD::CONCAT_VECTORS) {
22191 // V UNDEF BUILD_VECTOR UNDEF
22193 // CONCAT_VECTOR CONCAT_VECTOR
22196 // RESULT: V + zero extended
22198 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
22199 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
22200 V1.getOperand(1).getOpcode() != ISD::UNDEF)
22203 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
22206 // To match the shuffle mask, the first half of the mask should
22207 // be exactly the first vector, and all the rest a splat with the
22208 // first element of the second one.
22209 for (unsigned i = 0; i != NumElems/2; ++i)
22210 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
22211 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
22214 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
22215 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
22216 if (Ld->hasNUsesOfValue(1, 0)) {
22217 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
22218 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
22220 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
22222 Ld->getPointerInfo(),
22223 Ld->getAlignment(),
22224 false/*isVolatile*/, true/*ReadMem*/,
22225 false/*WriteMem*/);
22227 // Make sure the newly-created LOAD is in the same position as Ld in
22228 // terms of dependency. We create a TokenFactor for Ld and ResNode,
22229 // and update uses of Ld's output chain to use the TokenFactor.
22230 if (Ld->hasAnyUseOfValue(1)) {
22231 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
22232 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
22233 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
22234 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
22235 SDValue(ResNode.getNode(), 1));
22238 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
22242 // Emit a zeroed vector and insert the desired subvector on its
22244 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
22245 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
22246 return DCI.CombineTo(N, InsV);
22249 //===--------------------------------------------------------------------===//
22250 // Combine some shuffles into subvector extracts and inserts:
22253 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22254 if (isShuffleHigh128VectorInsertLow(SVOp)) {
22255 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
22256 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
22257 return DCI.CombineTo(N, InsV);
22260 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22261 if (isShuffleLow128VectorInsertHigh(SVOp)) {
22262 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
22263 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
22264 return DCI.CombineTo(N, InsV);
22270 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
22273 /// This is the leaf of the recursive combinine below. When we have found some
22274 /// chain of single-use x86 shuffle instructions and accumulated the combined
22275 /// shuffle mask represented by them, this will try to pattern match that mask
22276 /// into either a single instruction if there is a special purpose instruction
22277 /// for this operation, or into a PSHUFB instruction which is a fully general
22278 /// instruction but should only be used to replace chains over a certain depth.
22279 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22280 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22281 TargetLowering::DAGCombinerInfo &DCI,
22282 const X86Subtarget *Subtarget) {
22283 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22285 // Find the operand that enters the chain. Note that multiple uses are OK
22286 // here, we're not going to remove the operand we find.
22287 SDValue Input = Op.getOperand(0);
22288 while (Input.getOpcode() == ISD::BITCAST)
22289 Input = Input.getOperand(0);
22291 MVT VT = Input.getSimpleValueType();
22292 MVT RootVT = Root.getSimpleValueType();
22295 // Just remove no-op shuffle masks.
22296 if (Mask.size() == 1) {
22297 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
22302 // Use the float domain if the operand type is a floating point type.
22303 bool FloatDomain = VT.isFloatingPoint();
22305 // For floating point shuffles, we don't have free copies in the shuffle
22306 // instructions or the ability to load as part of the instruction, so
22307 // canonicalize their shuffles to UNPCK or MOV variants.
22309 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22310 // vectors because it can have a load folded into it that UNPCK cannot. This
22311 // doesn't preclude something switching to the shorter encoding post-RA.
22313 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
22314 bool Lo = Mask.equals(0, 0);
22317 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22318 // is no slower than UNPCKLPD but has the option to fold the input operand
22319 // into even an unaligned memory load.
22320 if (Lo && Subtarget->hasSSE3()) {
22321 Shuffle = X86ISD::MOVDDUP;
22322 ShuffleVT = MVT::v2f64;
22324 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22325 // than the UNPCK variants.
22326 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22327 ShuffleVT = MVT::v4f32;
22329 if (Depth == 1 && Root->getOpcode() == Shuffle)
22330 return false; // Nothing to do!
22331 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22332 DCI.AddToWorklist(Op.getNode());
22333 if (Shuffle == X86ISD::MOVDDUP)
22334 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22336 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22337 DCI.AddToWorklist(Op.getNode());
22338 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22342 if (Subtarget->hasSSE3() &&
22343 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22344 bool Lo = Mask.equals(0, 0, 2, 2);
22345 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22346 MVT ShuffleVT = MVT::v4f32;
22347 if (Depth == 1 && Root->getOpcode() == Shuffle)
22348 return false; // Nothing to do!
22349 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22350 DCI.AddToWorklist(Op.getNode());
22351 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22352 DCI.AddToWorklist(Op.getNode());
22353 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22357 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22358 bool Lo = Mask.equals(0, 0, 1, 1);
22359 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22360 MVT ShuffleVT = MVT::v4f32;
22361 if (Depth == 1 && Root->getOpcode() == Shuffle)
22362 return false; // Nothing to do!
22363 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22364 DCI.AddToWorklist(Op.getNode());
22365 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22366 DCI.AddToWorklist(Op.getNode());
22367 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22373 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22374 // variants as none of these have single-instruction variants that are
22375 // superior to the UNPCK formulation.
22376 if (!FloatDomain &&
22377 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22378 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22379 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22380 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22382 bool Lo = Mask[0] == 0;
22383 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22384 if (Depth == 1 && Root->getOpcode() == Shuffle)
22385 return false; // Nothing to do!
22387 switch (Mask.size()) {
22389 ShuffleVT = MVT::v8i16;
22392 ShuffleVT = MVT::v16i8;
22395 llvm_unreachable("Impossible mask size!");
22397 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22398 DCI.AddToWorklist(Op.getNode());
22399 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22400 DCI.AddToWorklist(Op.getNode());
22401 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22406 // Don't try to re-form single instruction chains under any circumstances now
22407 // that we've done encoding canonicalization for them.
22411 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22412 // can replace them with a single PSHUFB instruction profitably. Intel's
22413 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22414 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22415 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22416 SmallVector<SDValue, 16> PSHUFBMask;
22417 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22418 int Ratio = 16 / Mask.size();
22419 for (unsigned i = 0; i < 16; ++i) {
22420 if (Mask[i / Ratio] == SM_SentinelUndef) {
22421 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22424 int M = Mask[i / Ratio] != SM_SentinelZero
22425 ? Ratio * Mask[i / Ratio] + i % Ratio
22427 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22429 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22430 DCI.AddToWorklist(Op.getNode());
22431 SDValue PSHUFBMaskOp =
22432 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22433 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22434 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22435 DCI.AddToWorklist(Op.getNode());
22436 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22441 // Failed to find any combines.
22445 /// \brief Fully generic combining of x86 shuffle instructions.
22447 /// This should be the last combine run over the x86 shuffle instructions. Once
22448 /// they have been fully optimized, this will recursively consider all chains
22449 /// of single-use shuffle instructions, build a generic model of the cumulative
22450 /// shuffle operation, and check for simpler instructions which implement this
22451 /// operation. We use this primarily for two purposes:
22453 /// 1) Collapse generic shuffles to specialized single instructions when
22454 /// equivalent. In most cases, this is just an encoding size win, but
22455 /// sometimes we will collapse multiple generic shuffles into a single
22456 /// special-purpose shuffle.
22457 /// 2) Look for sequences of shuffle instructions with 3 or more total
22458 /// instructions, and replace them with the slightly more expensive SSSE3
22459 /// PSHUFB instruction if available. We do this as the last combining step
22460 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22461 /// a suitable short sequence of other instructions. The PHUFB will either
22462 /// use a register or have to read from memory and so is slightly (but only
22463 /// slightly) more expensive than the other shuffle instructions.
22465 /// Because this is inherently a quadratic operation (for each shuffle in
22466 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22467 /// This should never be an issue in practice as the shuffle lowering doesn't
22468 /// produce sequences of more than 8 instructions.
22470 /// FIXME: We will currently miss some cases where the redundant shuffling
22471 /// would simplify under the threshold for PSHUFB formation because of
22472 /// combine-ordering. To fix this, we should do the redundant instruction
22473 /// combining in this recursive walk.
22474 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22475 ArrayRef<int> RootMask,
22476 int Depth, bool HasPSHUFB,
22478 TargetLowering::DAGCombinerInfo &DCI,
22479 const X86Subtarget *Subtarget) {
22480 // Bound the depth of our recursive combine because this is ultimately
22481 // quadratic in nature.
22485 // Directly rip through bitcasts to find the underlying operand.
22486 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22487 Op = Op.getOperand(0);
22489 MVT VT = Op.getSimpleValueType();
22490 if (!VT.isVector())
22491 return false; // Bail if we hit a non-vector.
22492 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22493 // version should be added.
22494 if (VT.getSizeInBits() != 128)
22497 assert(Root.getSimpleValueType().isVector() &&
22498 "Shuffles operate on vector types!");
22499 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22500 "Can only combine shuffles of the same vector register size.");
22502 if (!isTargetShuffle(Op.getOpcode()))
22504 SmallVector<int, 16> OpMask;
22506 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22507 // We only can combine unary shuffles which we can decode the mask for.
22508 if (!HaveMask || !IsUnary)
22511 assert(VT.getVectorNumElements() == OpMask.size() &&
22512 "Different mask size from vector size!");
22513 assert(((RootMask.size() > OpMask.size() &&
22514 RootMask.size() % OpMask.size() == 0) ||
22515 (OpMask.size() > RootMask.size() &&
22516 OpMask.size() % RootMask.size() == 0) ||
22517 OpMask.size() == RootMask.size()) &&
22518 "The smaller number of elements must divide the larger.");
22519 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22520 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22521 assert(((RootRatio == 1 && OpRatio == 1) ||
22522 (RootRatio == 1) != (OpRatio == 1)) &&
22523 "Must not have a ratio for both incoming and op masks!");
22525 SmallVector<int, 16> Mask;
22526 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22528 // Merge this shuffle operation's mask into our accumulated mask. Note that
22529 // this shuffle's mask will be the first applied to the input, followed by the
22530 // root mask to get us all the way to the root value arrangement. The reason
22531 // for this order is that we are recursing up the operation chain.
22532 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22533 int RootIdx = i / RootRatio;
22534 if (RootMask[RootIdx] < 0) {
22535 // This is a zero or undef lane, we're done.
22536 Mask.push_back(RootMask[RootIdx]);
22540 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22541 int OpIdx = RootMaskedIdx / OpRatio;
22542 if (OpMask[OpIdx] < 0) {
22543 // The incoming lanes are zero or undef, it doesn't matter which ones we
22545 Mask.push_back(OpMask[OpIdx]);
22549 // Ok, we have non-zero lanes, map them through.
22550 Mask.push_back(OpMask[OpIdx] * OpRatio +
22551 RootMaskedIdx % OpRatio);
22554 // See if we can recurse into the operand to combine more things.
22555 switch (Op.getOpcode()) {
22556 case X86ISD::PSHUFB:
22558 case X86ISD::PSHUFD:
22559 case X86ISD::PSHUFHW:
22560 case X86ISD::PSHUFLW:
22561 if (Op.getOperand(0).hasOneUse() &&
22562 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22563 HasPSHUFB, DAG, DCI, Subtarget))
22567 case X86ISD::UNPCKL:
22568 case X86ISD::UNPCKH:
22569 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22570 // We can't check for single use, we have to check that this shuffle is the only user.
22571 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22572 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22573 HasPSHUFB, DAG, DCI, Subtarget))
22578 // Minor canonicalization of the accumulated shuffle mask to make it easier
22579 // to match below. All this does is detect masks with squential pairs of
22580 // elements, and shrink them to the half-width mask. It does this in a loop
22581 // so it will reduce the size of the mask to the minimal width mask which
22582 // performs an equivalent shuffle.
22583 SmallVector<int, 16> WidenedMask;
22584 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22585 Mask = std::move(WidenedMask);
22586 WidenedMask.clear();
22589 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22593 /// \brief Get the PSHUF-style mask from PSHUF node.
22595 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22596 /// PSHUF-style masks that can be reused with such instructions.
22597 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22598 SmallVector<int, 4> Mask;
22600 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22604 switch (N.getOpcode()) {
22605 case X86ISD::PSHUFD:
22607 case X86ISD::PSHUFLW:
22610 case X86ISD::PSHUFHW:
22611 Mask.erase(Mask.begin(), Mask.begin() + 4);
22612 for (int &M : Mask)
22616 llvm_unreachable("No valid shuffle instruction found!");
22620 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22622 /// We walk up the chain and look for a combinable shuffle, skipping over
22623 /// shuffles that we could hoist this shuffle's transformation past without
22624 /// altering anything.
22626 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22628 TargetLowering::DAGCombinerInfo &DCI) {
22629 assert(N.getOpcode() == X86ISD::PSHUFD &&
22630 "Called with something other than an x86 128-bit half shuffle!");
22633 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22634 // of the shuffles in the chain so that we can form a fresh chain to replace
22636 SmallVector<SDValue, 8> Chain;
22637 SDValue V = N.getOperand(0);
22638 for (; V.hasOneUse(); V = V.getOperand(0)) {
22639 switch (V.getOpcode()) {
22641 return SDValue(); // Nothing combined!
22644 // Skip bitcasts as we always know the type for the target specific
22648 case X86ISD::PSHUFD:
22649 // Found another dword shuffle.
22652 case X86ISD::PSHUFLW:
22653 // Check that the low words (being shuffled) are the identity in the
22654 // dword shuffle, and the high words are self-contained.
22655 if (Mask[0] != 0 || Mask[1] != 1 ||
22656 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22659 Chain.push_back(V);
22662 case X86ISD::PSHUFHW:
22663 // Check that the high words (being shuffled) are the identity in the
22664 // dword shuffle, and the low words are self-contained.
22665 if (Mask[2] != 2 || Mask[3] != 3 ||
22666 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22669 Chain.push_back(V);
22672 case X86ISD::UNPCKL:
22673 case X86ISD::UNPCKH:
22674 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22675 // shuffle into a preceding word shuffle.
22676 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22679 // Search for a half-shuffle which we can combine with.
22680 unsigned CombineOp =
22681 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22682 if (V.getOperand(0) != V.getOperand(1) ||
22683 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22685 Chain.push_back(V);
22686 V = V.getOperand(0);
22688 switch (V.getOpcode()) {
22690 return SDValue(); // Nothing to combine.
22692 case X86ISD::PSHUFLW:
22693 case X86ISD::PSHUFHW:
22694 if (V.getOpcode() == CombineOp)
22697 Chain.push_back(V);
22701 V = V.getOperand(0);
22705 } while (V.hasOneUse());
22708 // Break out of the loop if we break out of the switch.
22712 if (!V.hasOneUse())
22713 // We fell out of the loop without finding a viable combining instruction.
22716 // Merge this node's mask and our incoming mask.
22717 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22718 for (int &M : Mask)
22720 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22721 getV4X86ShuffleImm8ForMask(Mask, DAG));
22723 // Rebuild the chain around this new shuffle.
22724 while (!Chain.empty()) {
22725 SDValue W = Chain.pop_back_val();
22727 if (V.getValueType() != W.getOperand(0).getValueType())
22728 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22730 switch (W.getOpcode()) {
22732 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22734 case X86ISD::UNPCKL:
22735 case X86ISD::UNPCKH:
22736 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22739 case X86ISD::PSHUFD:
22740 case X86ISD::PSHUFLW:
22741 case X86ISD::PSHUFHW:
22742 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22746 if (V.getValueType() != N.getValueType())
22747 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22749 // Return the new chain to replace N.
22753 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22755 /// We walk up the chain, skipping shuffles of the other half and looking
22756 /// through shuffles which switch halves trying to find a shuffle of the same
22757 /// pair of dwords.
22758 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22760 TargetLowering::DAGCombinerInfo &DCI) {
22762 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22763 "Called with something other than an x86 128-bit half shuffle!");
22765 unsigned CombineOpcode = N.getOpcode();
22767 // Walk up a single-use chain looking for a combinable shuffle.
22768 SDValue V = N.getOperand(0);
22769 for (; V.hasOneUse(); V = V.getOperand(0)) {
22770 switch (V.getOpcode()) {
22772 return false; // Nothing combined!
22775 // Skip bitcasts as we always know the type for the target specific
22779 case X86ISD::PSHUFLW:
22780 case X86ISD::PSHUFHW:
22781 if (V.getOpcode() == CombineOpcode)
22784 // Other-half shuffles are no-ops.
22787 // Break out of the loop if we break out of the switch.
22791 if (!V.hasOneUse())
22792 // We fell out of the loop without finding a viable combining instruction.
22795 // Combine away the bottom node as its shuffle will be accumulated into
22796 // a preceding shuffle.
22797 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22799 // Record the old value.
22802 // Merge this node's mask and our incoming mask (adjusted to account for all
22803 // the pshufd instructions encountered).
22804 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22805 for (int &M : Mask)
22807 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22808 getV4X86ShuffleImm8ForMask(Mask, DAG));
22810 // Check that the shuffles didn't cancel each other out. If not, we need to
22811 // combine to the new one.
22813 // Replace the combinable shuffle with the combined one, updating all users
22814 // so that we re-evaluate the chain here.
22815 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22820 /// \brief Try to combine x86 target specific shuffles.
22821 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22822 TargetLowering::DAGCombinerInfo &DCI,
22823 const X86Subtarget *Subtarget) {
22825 MVT VT = N.getSimpleValueType();
22826 SmallVector<int, 4> Mask;
22828 switch (N.getOpcode()) {
22829 case X86ISD::PSHUFD:
22830 case X86ISD::PSHUFLW:
22831 case X86ISD::PSHUFHW:
22832 Mask = getPSHUFShuffleMask(N);
22833 assert(Mask.size() == 4);
22839 // Nuke no-op shuffles that show up after combining.
22840 if (isNoopShuffleMask(Mask))
22841 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22843 // Look for simplifications involving one or two shuffle instructions.
22844 SDValue V = N.getOperand(0);
22845 switch (N.getOpcode()) {
22848 case X86ISD::PSHUFLW:
22849 case X86ISD::PSHUFHW:
22850 assert(VT == MVT::v8i16);
22853 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22854 return SDValue(); // We combined away this shuffle, so we're done.
22856 // See if this reduces to a PSHUFD which is no more expensive and can
22857 // combine with more operations. Note that it has to at least flip the
22858 // dwords as otherwise it would have been removed as a no-op.
22859 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22860 int DMask[] = {0, 1, 2, 3};
22861 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22862 DMask[DOffset + 0] = DOffset + 1;
22863 DMask[DOffset + 1] = DOffset + 0;
22864 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22865 DCI.AddToWorklist(V.getNode());
22866 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22867 getV4X86ShuffleImm8ForMask(DMask, DAG));
22868 DCI.AddToWorklist(V.getNode());
22869 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22872 // Look for shuffle patterns which can be implemented as a single unpack.
22873 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22874 // only works when we have a PSHUFD followed by two half-shuffles.
22875 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22876 (V.getOpcode() == X86ISD::PSHUFLW ||
22877 V.getOpcode() == X86ISD::PSHUFHW) &&
22878 V.getOpcode() != N.getOpcode() &&
22880 SDValue D = V.getOperand(0);
22881 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22882 D = D.getOperand(0);
22883 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22884 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22885 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22886 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22887 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22889 for (int i = 0; i < 4; ++i) {
22890 WordMask[i + NOffset] = Mask[i] + NOffset;
22891 WordMask[i + VOffset] = VMask[i] + VOffset;
22893 // Map the word mask through the DWord mask.
22895 for (int i = 0; i < 8; ++i)
22896 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22897 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22898 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22899 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22900 std::begin(UnpackLoMask)) ||
22901 std::equal(std::begin(MappedMask), std::end(MappedMask),
22902 std::begin(UnpackHiMask))) {
22903 // We can replace all three shuffles with an unpack.
22904 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22905 DCI.AddToWorklist(V.getNode());
22906 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22908 DL, MVT::v8i16, V, V);
22915 case X86ISD::PSHUFD:
22916 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22925 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22927 /// We combine this directly on the abstract vector shuffle nodes so it is
22928 /// easier to generically match. We also insert dummy vector shuffle nodes for
22929 /// the operands which explicitly discard the lanes which are unused by this
22930 /// operation to try to flow through the rest of the combiner the fact that
22931 /// they're unused.
22932 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
22934 EVT VT = N->getValueType(0);
22936 // We only handle target-independent shuffles.
22937 // FIXME: It would be easy and harmless to use the target shuffle mask
22938 // extraction tool to support more.
22939 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
22942 auto *SVN = cast<ShuffleVectorSDNode>(N);
22943 ArrayRef<int> Mask = SVN->getMask();
22944 SDValue V1 = N->getOperand(0);
22945 SDValue V2 = N->getOperand(1);
22947 // We require the first shuffle operand to be the SUB node, and the second to
22948 // be the ADD node.
22949 // FIXME: We should support the commuted patterns.
22950 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
22953 // If there are other uses of these operations we can't fold them.
22954 if (!V1->hasOneUse() || !V2->hasOneUse())
22957 // Ensure that both operations have the same operands. Note that we can
22958 // commute the FADD operands.
22959 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
22960 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
22961 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
22964 // We're looking for blends between FADD and FSUB nodes. We insist on these
22965 // nodes being lined up in a specific expected pattern.
22966 if (!(isShuffleEquivalent(V1, V2, Mask, 0, 3) ||
22967 isShuffleEquivalent(V1, V2, Mask, 0, 5, 2, 7) ||
22968 isShuffleEquivalent(V1, V2, Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
22971 // Only specific types are legal at this point, assert so we notice if and
22972 // when these change.
22973 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
22974 VT == MVT::v4f64) &&
22975 "Unknown vector type encountered!");
22977 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
22980 /// PerformShuffleCombine - Performs several different shuffle combines.
22981 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
22982 TargetLowering::DAGCombinerInfo &DCI,
22983 const X86Subtarget *Subtarget) {
22985 SDValue N0 = N->getOperand(0);
22986 SDValue N1 = N->getOperand(1);
22987 EVT VT = N->getValueType(0);
22989 // Don't create instructions with illegal types after legalize types has run.
22990 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22991 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
22994 // If we have legalized the vector types, look for blends of FADD and FSUB
22995 // nodes that we can fuse into an ADDSUB node.
22996 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
22997 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
23000 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
23001 if (Subtarget->hasFp256() && VT.is256BitVector() &&
23002 N->getOpcode() == ISD::VECTOR_SHUFFLE)
23003 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
23005 // During Type Legalization, when promoting illegal vector types,
23006 // the backend might introduce new shuffle dag nodes and bitcasts.
23008 // This code performs the following transformation:
23009 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
23010 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
23012 // We do this only if both the bitcast and the BINOP dag nodes have
23013 // one use. Also, perform this transformation only if the new binary
23014 // operation is legal. This is to avoid introducing dag nodes that
23015 // potentially need to be further expanded (or custom lowered) into a
23016 // less optimal sequence of dag nodes.
23017 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
23018 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
23019 N0.getOpcode() == ISD::BITCAST) {
23020 SDValue BC0 = N0.getOperand(0);
23021 EVT SVT = BC0.getValueType();
23022 unsigned Opcode = BC0.getOpcode();
23023 unsigned NumElts = VT.getVectorNumElements();
23025 if (BC0.hasOneUse() && SVT.isVector() &&
23026 SVT.getVectorNumElements() * 2 == NumElts &&
23027 TLI.isOperationLegal(Opcode, VT)) {
23028 bool CanFold = false;
23040 unsigned SVTNumElts = SVT.getVectorNumElements();
23041 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
23042 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
23043 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
23044 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
23045 CanFold = SVOp->getMaskElt(i) < 0;
23048 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
23049 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
23050 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
23051 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
23056 // Only handle 128 wide vector from here on.
23057 if (!VT.is128BitVector())
23060 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
23061 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
23062 // consecutive, non-overlapping, and in the right order.
23063 SmallVector<SDValue, 16> Elts;
23064 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
23065 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
23067 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
23071 if (isTargetShuffle(N->getOpcode())) {
23073 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
23074 if (Shuffle.getNode())
23077 // Try recursively combining arbitrary sequences of x86 shuffle
23078 // instructions into higher-order shuffles. We do this after combining
23079 // specific PSHUF instruction sequences into their minimal form so that we
23080 // can evaluate how many specialized shuffle instructions are involved in
23081 // a particular chain.
23082 SmallVector<int, 1> NonceMask; // Just a placeholder.
23083 NonceMask.push_back(0);
23084 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
23085 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
23087 return SDValue(); // This routine will use CombineTo to replace N.
23093 /// PerformTruncateCombine - Converts truncate operation to
23094 /// a sequence of vector shuffle operations.
23095 /// It is possible when we truncate 256-bit vector to 128-bit vector
23096 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
23097 TargetLowering::DAGCombinerInfo &DCI,
23098 const X86Subtarget *Subtarget) {
23102 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
23103 /// specific shuffle of a load can be folded into a single element load.
23104 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
23105 /// shuffles have been custom lowered so we need to handle those here.
23106 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
23107 TargetLowering::DAGCombinerInfo &DCI) {
23108 if (DCI.isBeforeLegalizeOps())
23111 SDValue InVec = N->getOperand(0);
23112 SDValue EltNo = N->getOperand(1);
23114 if (!isa<ConstantSDNode>(EltNo))
23117 EVT OriginalVT = InVec.getValueType();
23119 if (InVec.getOpcode() == ISD::BITCAST) {
23120 // Don't duplicate a load with other uses.
23121 if (!InVec.hasOneUse())
23123 EVT BCVT = InVec.getOperand(0).getValueType();
23124 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
23126 InVec = InVec.getOperand(0);
23129 EVT CurrentVT = InVec.getValueType();
23131 if (!isTargetShuffle(InVec.getOpcode()))
23134 // Don't duplicate a load with other uses.
23135 if (!InVec.hasOneUse())
23138 SmallVector<int, 16> ShuffleMask;
23140 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
23141 ShuffleMask, UnaryShuffle))
23144 // Select the input vector, guarding against out of range extract vector.
23145 unsigned NumElems = CurrentVT.getVectorNumElements();
23146 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
23147 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
23148 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
23149 : InVec.getOperand(1);
23151 // If inputs to shuffle are the same for both ops, then allow 2 uses
23152 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
23153 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
23155 if (LdNode.getOpcode() == ISD::BITCAST) {
23156 // Don't duplicate a load with other uses.
23157 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
23160 AllowedUses = 1; // only allow 1 load use if we have a bitcast
23161 LdNode = LdNode.getOperand(0);
23164 if (!ISD::isNormalLoad(LdNode.getNode()))
23167 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
23169 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
23172 EVT EltVT = N->getValueType(0);
23173 // If there's a bitcast before the shuffle, check if the load type and
23174 // alignment is valid.
23175 unsigned Align = LN0->getAlignment();
23176 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23177 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
23178 EltVT.getTypeForEVT(*DAG.getContext()));
23180 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
23183 // All checks match so transform back to vector_shuffle so that DAG combiner
23184 // can finish the job
23187 // Create shuffle node taking into account the case that its a unary shuffle
23188 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
23189 : InVec.getOperand(1);
23190 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
23191 InVec.getOperand(0), Shuffle,
23193 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
23194 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
23198 /// \brief Detect bitcasts between i32 to x86mmx low word. Since MMX types are
23199 /// special and don't usually play with other vector types, it's better to
23200 /// handle them early to be sure we emit efficient code by avoiding
23201 /// store-load conversions.
23202 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG) {
23203 if (N->getValueType(0) != MVT::x86mmx ||
23204 N->getOperand(0)->getOpcode() != ISD::BUILD_VECTOR ||
23205 N->getOperand(0)->getValueType(0) != MVT::v2i32)
23208 SDValue V = N->getOperand(0);
23209 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1));
23210 if (C && C->getZExtValue() == 0 && V.getOperand(0).getValueType() == MVT::i32)
23211 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(V.getOperand(0)),
23212 N->getValueType(0), V.getOperand(0));
23217 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
23218 /// generation and convert it from being a bunch of shuffles and extracts
23219 /// into a somewhat faster sequence. For i686, the best sequence is apparently
23220 /// storing the value and loading scalars back, while for x64 we should
23221 /// use 64-bit extracts and shifts.
23222 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
23223 TargetLowering::DAGCombinerInfo &DCI) {
23224 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
23225 if (NewOp.getNode())
23228 SDValue InputVector = N->getOperand(0);
23230 // Detect mmx to i32 conversion through a v2i32 elt extract.
23231 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
23232 N->getValueType(0) == MVT::i32 &&
23233 InputVector.getValueType() == MVT::v2i32) {
23235 // The bitcast source is a direct mmx result.
23236 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
23237 if (MMXSrc.getValueType() == MVT::x86mmx)
23238 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23239 N->getValueType(0),
23240 InputVector.getNode()->getOperand(0));
23242 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
23243 SDValue MMXSrcOp = MMXSrc.getOperand(0);
23244 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
23245 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
23246 MMXSrcOp.getOpcode() == ISD::BITCAST &&
23247 MMXSrcOp.getValueType() == MVT::v1i64 &&
23248 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
23249 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23250 N->getValueType(0),
23251 MMXSrcOp.getOperand(0));
23254 // Only operate on vectors of 4 elements, where the alternative shuffling
23255 // gets to be more expensive.
23256 if (InputVector.getValueType() != MVT::v4i32)
23259 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
23260 // single use which is a sign-extend or zero-extend, and all elements are
23262 SmallVector<SDNode *, 4> Uses;
23263 unsigned ExtractedElements = 0;
23264 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
23265 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
23266 if (UI.getUse().getResNo() != InputVector.getResNo())
23269 SDNode *Extract = *UI;
23270 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23273 if (Extract->getValueType(0) != MVT::i32)
23275 if (!Extract->hasOneUse())
23277 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
23278 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
23280 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
23283 // Record which element was extracted.
23284 ExtractedElements |=
23285 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
23287 Uses.push_back(Extract);
23290 // If not all the elements were used, this may not be worthwhile.
23291 if (ExtractedElements != 15)
23294 // Ok, we've now decided to do the transformation.
23295 // If 64-bit shifts are legal, use the extract-shift sequence,
23296 // otherwise bounce the vector off the cache.
23297 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23299 SDLoc dl(InputVector);
23301 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
23302 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
23303 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
23304 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23305 DAG.getConstant(0, VecIdxTy));
23306 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23307 DAG.getConstant(1, VecIdxTy));
23309 SDValue ShAmt = DAG.getConstant(32,
23310 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
23311 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
23312 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23313 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
23314 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
23315 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23316 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
23318 // Store the value to a temporary stack slot.
23319 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
23320 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
23321 MachinePointerInfo(), false, false, 0);
23323 EVT ElementType = InputVector.getValueType().getVectorElementType();
23324 unsigned EltSize = ElementType.getSizeInBits() / 8;
23326 // Replace each use (extract) with a load of the appropriate element.
23327 for (unsigned i = 0; i < 4; ++i) {
23328 uint64_t Offset = EltSize * i;
23329 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
23331 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
23332 StackPtr, OffsetVal);
23334 // Load the scalar.
23335 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
23336 ScalarAddr, MachinePointerInfo(),
23337 false, false, false, 0);
23342 // Replace the extracts
23343 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
23344 UE = Uses.end(); UI != UE; ++UI) {
23345 SDNode *Extract = *UI;
23347 SDValue Idx = Extract->getOperand(1);
23348 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
23349 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
23352 // The replacement was made in place; don't return anything.
23356 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
23357 static std::pair<unsigned, bool>
23358 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
23359 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
23360 if (!VT.isVector())
23361 return std::make_pair(0, false);
23363 bool NeedSplit = false;
23364 switch (VT.getSimpleVT().SimpleTy) {
23365 default: return std::make_pair(0, false);
23368 if (!Subtarget->hasVLX())
23369 return std::make_pair(0, false);
23373 if (!Subtarget->hasBWI())
23374 return std::make_pair(0, false);
23378 if (!Subtarget->hasAVX512())
23379 return std::make_pair(0, false);
23384 if (!Subtarget->hasAVX2())
23386 if (!Subtarget->hasAVX())
23387 return std::make_pair(0, false);
23392 if (!Subtarget->hasSSE2())
23393 return std::make_pair(0, false);
23396 // SSE2 has only a small subset of the operations.
23397 bool hasUnsigned = Subtarget->hasSSE41() ||
23398 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23399 bool hasSigned = Subtarget->hasSSE41() ||
23400 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23402 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23405 // Check for x CC y ? x : y.
23406 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23407 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23412 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23415 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23418 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23421 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23423 // Check for x CC y ? y : x -- a min/max with reversed arms.
23424 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23425 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23430 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23433 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23436 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23439 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23443 return std::make_pair(Opc, NeedSplit);
23447 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23448 const X86Subtarget *Subtarget) {
23450 SDValue Cond = N->getOperand(0);
23451 SDValue LHS = N->getOperand(1);
23452 SDValue RHS = N->getOperand(2);
23454 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23455 SDValue CondSrc = Cond->getOperand(0);
23456 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23457 Cond = CondSrc->getOperand(0);
23460 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23463 // A vselect where all conditions and data are constants can be optimized into
23464 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23465 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23466 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23469 unsigned MaskValue = 0;
23470 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23473 MVT VT = N->getSimpleValueType(0);
23474 unsigned NumElems = VT.getVectorNumElements();
23475 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23476 for (unsigned i = 0; i < NumElems; ++i) {
23477 // Be sure we emit undef where we can.
23478 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23479 ShuffleMask[i] = -1;
23481 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23484 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23485 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23487 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23490 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23492 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23493 TargetLowering::DAGCombinerInfo &DCI,
23494 const X86Subtarget *Subtarget) {
23496 SDValue Cond = N->getOperand(0);
23497 // Get the LHS/RHS of the select.
23498 SDValue LHS = N->getOperand(1);
23499 SDValue RHS = N->getOperand(2);
23500 EVT VT = LHS.getValueType();
23501 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23503 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23504 // instructions match the semantics of the common C idiom x<y?x:y but not
23505 // x<=y?x:y, because of how they handle negative zero (which can be
23506 // ignored in unsafe-math mode).
23507 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23508 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23509 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23510 (Subtarget->hasSSE2() ||
23511 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23512 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23514 unsigned Opcode = 0;
23515 // Check for x CC y ? x : y.
23516 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23517 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23521 // Converting this to a min would handle NaNs incorrectly, and swapping
23522 // the operands would cause it to handle comparisons between positive
23523 // and negative zero incorrectly.
23524 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23525 if (!DAG.getTarget().Options.UnsafeFPMath &&
23526 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23528 std::swap(LHS, RHS);
23530 Opcode = X86ISD::FMIN;
23533 // Converting this to a min would handle comparisons between positive
23534 // and negative zero incorrectly.
23535 if (!DAG.getTarget().Options.UnsafeFPMath &&
23536 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23538 Opcode = X86ISD::FMIN;
23541 // Converting this to a min would handle both negative zeros and NaNs
23542 // incorrectly, but we can swap the operands to fix both.
23543 std::swap(LHS, RHS);
23547 Opcode = X86ISD::FMIN;
23551 // Converting this to a max would handle comparisons between positive
23552 // and negative zero incorrectly.
23553 if (!DAG.getTarget().Options.UnsafeFPMath &&
23554 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23556 Opcode = X86ISD::FMAX;
23559 // Converting this to a max would handle NaNs incorrectly, and swapping
23560 // the operands would cause it to handle comparisons between positive
23561 // and negative zero incorrectly.
23562 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23563 if (!DAG.getTarget().Options.UnsafeFPMath &&
23564 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23566 std::swap(LHS, RHS);
23568 Opcode = X86ISD::FMAX;
23571 // Converting this to a max would handle both negative zeros and NaNs
23572 // incorrectly, but we can swap the operands to fix both.
23573 std::swap(LHS, RHS);
23577 Opcode = X86ISD::FMAX;
23580 // Check for x CC y ? y : x -- a min/max with reversed arms.
23581 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23582 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23586 // Converting this to a min would handle comparisons between positive
23587 // and negative zero incorrectly, and swapping the operands would
23588 // cause it to handle NaNs incorrectly.
23589 if (!DAG.getTarget().Options.UnsafeFPMath &&
23590 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23591 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23593 std::swap(LHS, RHS);
23595 Opcode = X86ISD::FMIN;
23598 // Converting this to a min would handle NaNs incorrectly.
23599 if (!DAG.getTarget().Options.UnsafeFPMath &&
23600 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23602 Opcode = X86ISD::FMIN;
23605 // Converting this to a min would handle both negative zeros and NaNs
23606 // incorrectly, but we can swap the operands to fix both.
23607 std::swap(LHS, RHS);
23611 Opcode = X86ISD::FMIN;
23615 // Converting this to a max would handle NaNs incorrectly.
23616 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23618 Opcode = X86ISD::FMAX;
23621 // Converting this to a max would handle comparisons between positive
23622 // and negative zero incorrectly, and swapping the operands would
23623 // cause it to handle NaNs incorrectly.
23624 if (!DAG.getTarget().Options.UnsafeFPMath &&
23625 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23626 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23628 std::swap(LHS, RHS);
23630 Opcode = X86ISD::FMAX;
23633 // Converting this to a max would handle both negative zeros and NaNs
23634 // incorrectly, but we can swap the operands to fix both.
23635 std::swap(LHS, RHS);
23639 Opcode = X86ISD::FMAX;
23645 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23648 EVT CondVT = Cond.getValueType();
23649 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23650 CondVT.getVectorElementType() == MVT::i1) {
23651 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23652 // lowering on KNL. In this case we convert it to
23653 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23654 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23655 // Since SKX these selects have a proper lowering.
23656 EVT OpVT = LHS.getValueType();
23657 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23658 (OpVT.getVectorElementType() == MVT::i8 ||
23659 OpVT.getVectorElementType() == MVT::i16) &&
23660 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23661 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23662 DCI.AddToWorklist(Cond.getNode());
23663 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23666 // If this is a select between two integer constants, try to do some
23668 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23669 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23670 // Don't do this for crazy integer types.
23671 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23672 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23673 // so that TrueC (the true value) is larger than FalseC.
23674 bool NeedsCondInvert = false;
23676 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23677 // Efficiently invertible.
23678 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23679 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23680 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23681 NeedsCondInvert = true;
23682 std::swap(TrueC, FalseC);
23685 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23686 if (FalseC->getAPIntValue() == 0 &&
23687 TrueC->getAPIntValue().isPowerOf2()) {
23688 if (NeedsCondInvert) // Invert the condition if needed.
23689 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23690 DAG.getConstant(1, Cond.getValueType()));
23692 // Zero extend the condition if needed.
23693 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23695 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23696 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23697 DAG.getConstant(ShAmt, MVT::i8));
23700 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23701 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23702 if (NeedsCondInvert) // Invert the condition if needed.
23703 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23704 DAG.getConstant(1, Cond.getValueType()));
23706 // Zero extend the condition if needed.
23707 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23708 FalseC->getValueType(0), Cond);
23709 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23710 SDValue(FalseC, 0));
23713 // Optimize cases that will turn into an LEA instruction. This requires
23714 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23715 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23716 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23717 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23719 bool isFastMultiplier = false;
23721 switch ((unsigned char)Diff) {
23723 case 1: // result = add base, cond
23724 case 2: // result = lea base( , cond*2)
23725 case 3: // result = lea base(cond, cond*2)
23726 case 4: // result = lea base( , cond*4)
23727 case 5: // result = lea base(cond, cond*4)
23728 case 8: // result = lea base( , cond*8)
23729 case 9: // result = lea base(cond, cond*8)
23730 isFastMultiplier = true;
23735 if (isFastMultiplier) {
23736 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23737 if (NeedsCondInvert) // Invert the condition if needed.
23738 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23739 DAG.getConstant(1, Cond.getValueType()));
23741 // Zero extend the condition if needed.
23742 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23744 // Scale the condition by the difference.
23746 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23747 DAG.getConstant(Diff, Cond.getValueType()));
23749 // Add the base if non-zero.
23750 if (FalseC->getAPIntValue() != 0)
23751 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23752 SDValue(FalseC, 0));
23759 // Canonicalize max and min:
23760 // (x > y) ? x : y -> (x >= y) ? x : y
23761 // (x < y) ? x : y -> (x <= y) ? x : y
23762 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23763 // the need for an extra compare
23764 // against zero. e.g.
23765 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23767 // testl %edi, %edi
23769 // cmovgl %edi, %eax
23773 // cmovsl %eax, %edi
23774 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23775 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23776 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23777 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23782 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23783 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23784 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23785 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23790 // Early exit check
23791 if (!TLI.isTypeLegal(VT))
23794 // Match VSELECTs into subs with unsigned saturation.
23795 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23796 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23797 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23798 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23799 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23801 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23802 // left side invert the predicate to simplify logic below.
23804 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23806 CC = ISD::getSetCCInverse(CC, true);
23807 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23811 if (Other.getNode() && Other->getNumOperands() == 2 &&
23812 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23813 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23814 SDValue CondRHS = Cond->getOperand(1);
23816 // Look for a general sub with unsigned saturation first.
23817 // x >= y ? x-y : 0 --> subus x, y
23818 // x > y ? x-y : 0 --> subus x, y
23819 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23820 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23821 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23823 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23824 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23825 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23826 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23827 // If the RHS is a constant we have to reverse the const
23828 // canonicalization.
23829 // x > C-1 ? x+-C : 0 --> subus x, C
23830 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23831 CondRHSConst->getAPIntValue() ==
23832 (-OpRHSConst->getAPIntValue() - 1))
23833 return DAG.getNode(
23834 X86ISD::SUBUS, DL, VT, OpLHS,
23835 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23837 // Another special case: If C was a sign bit, the sub has been
23838 // canonicalized into a xor.
23839 // FIXME: Would it be better to use computeKnownBits to determine
23840 // whether it's safe to decanonicalize the xor?
23841 // x s< 0 ? x^C : 0 --> subus x, C
23842 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23843 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23844 OpRHSConst->getAPIntValue().isSignBit())
23845 // Note that we have to rebuild the RHS constant here to ensure we
23846 // don't rely on particular values of undef lanes.
23847 return DAG.getNode(
23848 X86ISD::SUBUS, DL, VT, OpLHS,
23849 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23854 // Try to match a min/max vector operation.
23855 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23856 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23857 unsigned Opc = ret.first;
23858 bool NeedSplit = ret.second;
23860 if (Opc && NeedSplit) {
23861 unsigned NumElems = VT.getVectorNumElements();
23862 // Extract the LHS vectors
23863 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23864 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23866 // Extract the RHS vectors
23867 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23868 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23870 // Create min/max for each subvector
23871 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23872 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23874 // Merge the result
23875 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23877 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23880 // Simplify vector selection if condition value type matches vselect
23882 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23883 assert(Cond.getValueType().isVector() &&
23884 "vector select expects a vector selector!");
23886 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23887 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23889 // Try invert the condition if true value is not all 1s and false value
23891 if (!TValIsAllOnes && !FValIsAllZeros &&
23892 // Check if the selector will be produced by CMPP*/PCMP*
23893 Cond.getOpcode() == ISD::SETCC &&
23894 // Check if SETCC has already been promoted
23895 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23896 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23897 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23899 if (TValIsAllZeros || FValIsAllOnes) {
23900 SDValue CC = Cond.getOperand(2);
23901 ISD::CondCode NewCC =
23902 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23903 Cond.getOperand(0).getValueType().isInteger());
23904 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23905 std::swap(LHS, RHS);
23906 TValIsAllOnes = FValIsAllOnes;
23907 FValIsAllZeros = TValIsAllZeros;
23911 if (TValIsAllOnes || FValIsAllZeros) {
23914 if (TValIsAllOnes && FValIsAllZeros)
23916 else if (TValIsAllOnes)
23917 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23918 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23919 else if (FValIsAllZeros)
23920 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23921 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23923 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23927 // If we know that this node is legal then we know that it is going to be
23928 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23929 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23930 // to simplify previous instructions.
23931 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23932 !DCI.isBeforeLegalize() &&
23933 // We explicitly check against v8i16 and v16i16 because, although
23934 // they're marked as Custom, they might only be legal when Cond is a
23935 // build_vector of constants. This will be taken care in a later
23937 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
23938 VT != MVT::v8i16) &&
23939 // Don't optimize vector of constants. Those are handled by
23940 // the generic code and all the bits must be properly set for
23941 // the generic optimizer.
23942 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
23943 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
23945 // Don't optimize vector selects that map to mask-registers.
23949 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
23950 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
23952 APInt KnownZero, KnownOne;
23953 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
23954 DCI.isBeforeLegalizeOps());
23955 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
23956 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
23958 // If we changed the computation somewhere in the DAG, this change
23959 // will affect all users of Cond.
23960 // Make sure it is fine and update all the nodes so that we do not
23961 // use the generic VSELECT anymore. Otherwise, we may perform
23962 // wrong optimizations as we messed up with the actual expectation
23963 // for the vector boolean values.
23964 if (Cond != TLO.Old) {
23965 // Check all uses of that condition operand to check whether it will be
23966 // consumed by non-BLEND instructions, which may depend on all bits are
23968 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23970 if (I->getOpcode() != ISD::VSELECT)
23971 // TODO: Add other opcodes eventually lowered into BLEND.
23974 // Update all the users of the condition, before committing the change,
23975 // so that the VSELECT optimizations that expect the correct vector
23976 // boolean value will not be triggered.
23977 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23979 DAG.ReplaceAllUsesOfValueWith(
23981 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
23982 Cond, I->getOperand(1), I->getOperand(2)));
23983 DCI.CommitTargetLoweringOpt(TLO);
23986 // At this point, only Cond is changed. Change the condition
23987 // just for N to keep the opportunity to optimize all other
23988 // users their own way.
23989 DAG.ReplaceAllUsesOfValueWith(
23991 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
23992 TLO.New, N->getOperand(1), N->getOperand(2)));
23997 // We should generate an X86ISD::BLENDI from a vselect if its argument
23998 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
23999 // constants. This specific pattern gets generated when we split a
24000 // selector for a 512 bit vector in a machine without AVX512 (but with
24001 // 256-bit vectors), during legalization:
24003 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
24005 // Iff we find this pattern and the build_vectors are built from
24006 // constants, we translate the vselect into a shuffle_vector that we
24007 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
24008 if ((N->getOpcode() == ISD::VSELECT ||
24009 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
24010 !DCI.isBeforeLegalize()) {
24011 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
24012 if (Shuffle.getNode())
24019 // Check whether a boolean test is testing a boolean value generated by
24020 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
24023 // Simplify the following patterns:
24024 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
24025 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
24026 // to (Op EFLAGS Cond)
24028 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
24029 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
24030 // to (Op EFLAGS !Cond)
24032 // where Op could be BRCOND or CMOV.
24034 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
24035 // Quit if not CMP and SUB with its value result used.
24036 if (Cmp.getOpcode() != X86ISD::CMP &&
24037 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
24040 // Quit if not used as a boolean value.
24041 if (CC != X86::COND_E && CC != X86::COND_NE)
24044 // Check CMP operands. One of them should be 0 or 1 and the other should be
24045 // an SetCC or extended from it.
24046 SDValue Op1 = Cmp.getOperand(0);
24047 SDValue Op2 = Cmp.getOperand(1);
24050 const ConstantSDNode* C = nullptr;
24051 bool needOppositeCond = (CC == X86::COND_E);
24052 bool checkAgainstTrue = false; // Is it a comparison against 1?
24054 if ((C = dyn_cast<ConstantSDNode>(Op1)))
24056 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
24058 else // Quit if all operands are not constants.
24061 if (C->getZExtValue() == 1) {
24062 needOppositeCond = !needOppositeCond;
24063 checkAgainstTrue = true;
24064 } else if (C->getZExtValue() != 0)
24065 // Quit if the constant is neither 0 or 1.
24068 bool truncatedToBoolWithAnd = false;
24069 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
24070 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
24071 SetCC.getOpcode() == ISD::TRUNCATE ||
24072 SetCC.getOpcode() == ISD::AND) {
24073 if (SetCC.getOpcode() == ISD::AND) {
24075 ConstantSDNode *CS;
24076 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
24077 CS->getZExtValue() == 1)
24079 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
24080 CS->getZExtValue() == 1)
24084 SetCC = SetCC.getOperand(OpIdx);
24085 truncatedToBoolWithAnd = true;
24087 SetCC = SetCC.getOperand(0);
24090 switch (SetCC.getOpcode()) {
24091 case X86ISD::SETCC_CARRY:
24092 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
24093 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
24094 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
24095 // truncated to i1 using 'and'.
24096 if (checkAgainstTrue && !truncatedToBoolWithAnd)
24098 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
24099 "Invalid use of SETCC_CARRY!");
24101 case X86ISD::SETCC:
24102 // Set the condition code or opposite one if necessary.
24103 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
24104 if (needOppositeCond)
24105 CC = X86::GetOppositeBranchCondition(CC);
24106 return SetCC.getOperand(1);
24107 case X86ISD::CMOV: {
24108 // Check whether false/true value has canonical one, i.e. 0 or 1.
24109 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
24110 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
24111 // Quit if true value is not a constant.
24114 // Quit if false value is not a constant.
24116 SDValue Op = SetCC.getOperand(0);
24117 // Skip 'zext' or 'trunc' node.
24118 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
24119 Op.getOpcode() == ISD::TRUNCATE)
24120 Op = Op.getOperand(0);
24121 // A special case for rdrand/rdseed, where 0 is set if false cond is
24123 if ((Op.getOpcode() != X86ISD::RDRAND &&
24124 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
24127 // Quit if false value is not the constant 0 or 1.
24128 bool FValIsFalse = true;
24129 if (FVal && FVal->getZExtValue() != 0) {
24130 if (FVal->getZExtValue() != 1)
24132 // If FVal is 1, opposite cond is needed.
24133 needOppositeCond = !needOppositeCond;
24134 FValIsFalse = false;
24136 // Quit if TVal is not the constant opposite of FVal.
24137 if (FValIsFalse && TVal->getZExtValue() != 1)
24139 if (!FValIsFalse && TVal->getZExtValue() != 0)
24141 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
24142 if (needOppositeCond)
24143 CC = X86::GetOppositeBranchCondition(CC);
24144 return SetCC.getOperand(3);
24151 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
24152 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
24153 TargetLowering::DAGCombinerInfo &DCI,
24154 const X86Subtarget *Subtarget) {
24157 // If the flag operand isn't dead, don't touch this CMOV.
24158 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
24161 SDValue FalseOp = N->getOperand(0);
24162 SDValue TrueOp = N->getOperand(1);
24163 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
24164 SDValue Cond = N->getOperand(3);
24166 if (CC == X86::COND_E || CC == X86::COND_NE) {
24167 switch (Cond.getOpcode()) {
24171 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
24172 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
24173 return (CC == X86::COND_E) ? FalseOp : TrueOp;
24179 Flags = checkBoolTestSetCCCombine(Cond, CC);
24180 if (Flags.getNode() &&
24181 // Extra check as FCMOV only supports a subset of X86 cond.
24182 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
24183 SDValue Ops[] = { FalseOp, TrueOp,
24184 DAG.getConstant(CC, MVT::i8), Flags };
24185 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
24188 // If this is a select between two integer constants, try to do some
24189 // optimizations. Note that the operands are ordered the opposite of SELECT
24191 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
24192 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
24193 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
24194 // larger than FalseC (the false value).
24195 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
24196 CC = X86::GetOppositeBranchCondition(CC);
24197 std::swap(TrueC, FalseC);
24198 std::swap(TrueOp, FalseOp);
24201 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
24202 // This is efficient for any integer data type (including i8/i16) and
24204 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
24205 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24206 DAG.getConstant(CC, MVT::i8), Cond);
24208 // Zero extend the condition if needed.
24209 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
24211 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
24212 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
24213 DAG.getConstant(ShAmt, MVT::i8));
24214 if (N->getNumValues() == 2) // Dead flag value?
24215 return DCI.CombineTo(N, Cond, SDValue());
24219 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
24220 // for any integer data type, including i8/i16.
24221 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
24222 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24223 DAG.getConstant(CC, MVT::i8), Cond);
24225 // Zero extend the condition if needed.
24226 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
24227 FalseC->getValueType(0), Cond);
24228 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24229 SDValue(FalseC, 0));
24231 if (N->getNumValues() == 2) // Dead flag value?
24232 return DCI.CombineTo(N, Cond, SDValue());
24236 // Optimize cases that will turn into an LEA instruction. This requires
24237 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
24238 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
24239 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
24240 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
24242 bool isFastMultiplier = false;
24244 switch ((unsigned char)Diff) {
24246 case 1: // result = add base, cond
24247 case 2: // result = lea base( , cond*2)
24248 case 3: // result = lea base(cond, cond*2)
24249 case 4: // result = lea base( , cond*4)
24250 case 5: // result = lea base(cond, cond*4)
24251 case 8: // result = lea base( , cond*8)
24252 case 9: // result = lea base(cond, cond*8)
24253 isFastMultiplier = true;
24258 if (isFastMultiplier) {
24259 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
24260 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24261 DAG.getConstant(CC, MVT::i8), Cond);
24262 // Zero extend the condition if needed.
24263 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
24265 // Scale the condition by the difference.
24267 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
24268 DAG.getConstant(Diff, Cond.getValueType()));
24270 // Add the base if non-zero.
24271 if (FalseC->getAPIntValue() != 0)
24272 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24273 SDValue(FalseC, 0));
24274 if (N->getNumValues() == 2) // Dead flag value?
24275 return DCI.CombineTo(N, Cond, SDValue());
24282 // Handle these cases:
24283 // (select (x != c), e, c) -> select (x != c), e, x),
24284 // (select (x == c), c, e) -> select (x == c), x, e)
24285 // where the c is an integer constant, and the "select" is the combination
24286 // of CMOV and CMP.
24288 // The rationale for this change is that the conditional-move from a constant
24289 // needs two instructions, however, conditional-move from a register needs
24290 // only one instruction.
24292 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
24293 // some instruction-combining opportunities. This opt needs to be
24294 // postponed as late as possible.
24296 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
24297 // the DCI.xxxx conditions are provided to postpone the optimization as
24298 // late as possible.
24300 ConstantSDNode *CmpAgainst = nullptr;
24301 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
24302 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
24303 !isa<ConstantSDNode>(Cond.getOperand(0))) {
24305 if (CC == X86::COND_NE &&
24306 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
24307 CC = X86::GetOppositeBranchCondition(CC);
24308 std::swap(TrueOp, FalseOp);
24311 if (CC == X86::COND_E &&
24312 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
24313 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24314 DAG.getConstant(CC, MVT::i8), Cond };
24315 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24323 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
24324 const X86Subtarget *Subtarget) {
24325 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
24327 default: return SDValue();
24328 // SSE/AVX/AVX2 blend intrinsics.
24329 case Intrinsic::x86_avx2_pblendvb:
24330 case Intrinsic::x86_avx2_pblendw:
24331 case Intrinsic::x86_avx2_pblendd_128:
24332 case Intrinsic::x86_avx2_pblendd_256:
24333 // Don't try to simplify this intrinsic if we don't have AVX2.
24334 if (!Subtarget->hasAVX2())
24337 case Intrinsic::x86_avx_blend_pd_256:
24338 case Intrinsic::x86_avx_blend_ps_256:
24339 case Intrinsic::x86_avx_blendv_pd_256:
24340 case Intrinsic::x86_avx_blendv_ps_256:
24341 // Don't try to simplify this intrinsic if we don't have AVX.
24342 if (!Subtarget->hasAVX())
24345 case Intrinsic::x86_sse41_pblendw:
24346 case Intrinsic::x86_sse41_blendpd:
24347 case Intrinsic::x86_sse41_blendps:
24348 case Intrinsic::x86_sse41_blendvps:
24349 case Intrinsic::x86_sse41_blendvpd:
24350 case Intrinsic::x86_sse41_pblendvb: {
24351 SDValue Op0 = N->getOperand(1);
24352 SDValue Op1 = N->getOperand(2);
24353 SDValue Mask = N->getOperand(3);
24355 // Don't try to simplify this intrinsic if we don't have SSE4.1.
24356 if (!Subtarget->hasSSE41())
24359 // fold (blend A, A, Mask) -> A
24362 // fold (blend A, B, allZeros) -> A
24363 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
24365 // fold (blend A, B, allOnes) -> B
24366 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24369 // Simplify the case where the mask is a constant i32 value.
24370 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
24371 if (C->isNullValue())
24373 if (C->isAllOnesValue())
24380 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24381 case Intrinsic::x86_sse2_psrai_w:
24382 case Intrinsic::x86_sse2_psrai_d:
24383 case Intrinsic::x86_avx2_psrai_w:
24384 case Intrinsic::x86_avx2_psrai_d:
24385 case Intrinsic::x86_sse2_psra_w:
24386 case Intrinsic::x86_sse2_psra_d:
24387 case Intrinsic::x86_avx2_psra_w:
24388 case Intrinsic::x86_avx2_psra_d: {
24389 SDValue Op0 = N->getOperand(1);
24390 SDValue Op1 = N->getOperand(2);
24391 EVT VT = Op0.getValueType();
24392 assert(VT.isVector() && "Expected a vector type!");
24394 if (isa<BuildVectorSDNode>(Op1))
24395 Op1 = Op1.getOperand(0);
24397 if (!isa<ConstantSDNode>(Op1))
24400 EVT SVT = VT.getVectorElementType();
24401 unsigned SVTBits = SVT.getSizeInBits();
24403 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24404 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24405 uint64_t ShAmt = C.getZExtValue();
24407 // Don't try to convert this shift into a ISD::SRA if the shift
24408 // count is bigger than or equal to the element size.
24409 if (ShAmt >= SVTBits)
24412 // Trivial case: if the shift count is zero, then fold this
24413 // into the first operand.
24417 // Replace this packed shift intrinsic with a target independent
24419 SDValue Splat = DAG.getConstant(C, VT);
24420 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24425 /// PerformMulCombine - Optimize a single multiply with constant into two
24426 /// in order to implement it with two cheaper instructions, e.g.
24427 /// LEA + SHL, LEA + LEA.
24428 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24429 TargetLowering::DAGCombinerInfo &DCI) {
24430 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24433 EVT VT = N->getValueType(0);
24434 if (VT != MVT::i64 && VT != MVT::i32)
24437 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24440 uint64_t MulAmt = C->getZExtValue();
24441 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24444 uint64_t MulAmt1 = 0;
24445 uint64_t MulAmt2 = 0;
24446 if ((MulAmt % 9) == 0) {
24448 MulAmt2 = MulAmt / 9;
24449 } else if ((MulAmt % 5) == 0) {
24451 MulAmt2 = MulAmt / 5;
24452 } else if ((MulAmt % 3) == 0) {
24454 MulAmt2 = MulAmt / 3;
24457 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24460 if (isPowerOf2_64(MulAmt2) &&
24461 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24462 // If second multiplifer is pow2, issue it first. We want the multiply by
24463 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24465 std::swap(MulAmt1, MulAmt2);
24468 if (isPowerOf2_64(MulAmt1))
24469 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24470 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24472 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24473 DAG.getConstant(MulAmt1, VT));
24475 if (isPowerOf2_64(MulAmt2))
24476 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24477 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24479 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24480 DAG.getConstant(MulAmt2, VT));
24482 // Do not add new nodes to DAG combiner worklist.
24483 DCI.CombineTo(N, NewMul, false);
24488 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24489 SDValue N0 = N->getOperand(0);
24490 SDValue N1 = N->getOperand(1);
24491 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24492 EVT VT = N0.getValueType();
24494 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24495 // since the result of setcc_c is all zero's or all ones.
24496 if (VT.isInteger() && !VT.isVector() &&
24497 N1C && N0.getOpcode() == ISD::AND &&
24498 N0.getOperand(1).getOpcode() == ISD::Constant) {
24499 SDValue N00 = N0.getOperand(0);
24500 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24501 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24502 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24503 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24504 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24505 APInt ShAmt = N1C->getAPIntValue();
24506 Mask = Mask.shl(ShAmt);
24508 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24509 N00, DAG.getConstant(Mask, VT));
24513 // Hardware support for vector shifts is sparse which makes us scalarize the
24514 // vector operations in many cases. Also, on sandybridge ADD is faster than
24516 // (shl V, 1) -> add V,V
24517 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24518 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24519 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24520 // We shift all of the values by one. In many cases we do not have
24521 // hardware support for this operation. This is better expressed as an ADD
24523 if (N1SplatC->getZExtValue() == 1)
24524 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24530 /// \brief Returns a vector of 0s if the node in input is a vector logical
24531 /// shift by a constant amount which is known to be bigger than or equal
24532 /// to the vector element size in bits.
24533 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24534 const X86Subtarget *Subtarget) {
24535 EVT VT = N->getValueType(0);
24537 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24538 (!Subtarget->hasInt256() ||
24539 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24542 SDValue Amt = N->getOperand(1);
24544 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24545 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24546 APInt ShiftAmt = AmtSplat->getAPIntValue();
24547 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24549 // SSE2/AVX2 logical shifts always return a vector of 0s
24550 // if the shift amount is bigger than or equal to
24551 // the element size. The constant shift amount will be
24552 // encoded as a 8-bit immediate.
24553 if (ShiftAmt.trunc(8).uge(MaxAmount))
24554 return getZeroVector(VT, Subtarget, DAG, DL);
24560 /// PerformShiftCombine - Combine shifts.
24561 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24562 TargetLowering::DAGCombinerInfo &DCI,
24563 const X86Subtarget *Subtarget) {
24564 if (N->getOpcode() == ISD::SHL) {
24565 SDValue V = PerformSHLCombine(N, DAG);
24566 if (V.getNode()) return V;
24569 if (N->getOpcode() != ISD::SRA) {
24570 // Try to fold this logical shift into a zero vector.
24571 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24572 if (V.getNode()) return V;
24578 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24579 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24580 // and friends. Likewise for OR -> CMPNEQSS.
24581 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24582 TargetLowering::DAGCombinerInfo &DCI,
24583 const X86Subtarget *Subtarget) {
24586 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24587 // we're requiring SSE2 for both.
24588 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24589 SDValue N0 = N->getOperand(0);
24590 SDValue N1 = N->getOperand(1);
24591 SDValue CMP0 = N0->getOperand(1);
24592 SDValue CMP1 = N1->getOperand(1);
24595 // The SETCCs should both refer to the same CMP.
24596 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24599 SDValue CMP00 = CMP0->getOperand(0);
24600 SDValue CMP01 = CMP0->getOperand(1);
24601 EVT VT = CMP00.getValueType();
24603 if (VT == MVT::f32 || VT == MVT::f64) {
24604 bool ExpectingFlags = false;
24605 // Check for any users that want flags:
24606 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24607 !ExpectingFlags && UI != UE; ++UI)
24608 switch (UI->getOpcode()) {
24613 ExpectingFlags = true;
24615 case ISD::CopyToReg:
24616 case ISD::SIGN_EXTEND:
24617 case ISD::ZERO_EXTEND:
24618 case ISD::ANY_EXTEND:
24622 if (!ExpectingFlags) {
24623 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24624 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24626 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24627 X86::CondCode tmp = cc0;
24632 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24633 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24634 // FIXME: need symbolic constants for these magic numbers.
24635 // See X86ATTInstPrinter.cpp:printSSECC().
24636 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24637 if (Subtarget->hasAVX512()) {
24638 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24639 CMP01, DAG.getConstant(x86cc, MVT::i8));
24640 if (N->getValueType(0) != MVT::i1)
24641 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24645 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24646 CMP00.getValueType(), CMP00, CMP01,
24647 DAG.getConstant(x86cc, MVT::i8));
24649 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24650 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24652 if (is64BitFP && !Subtarget->is64Bit()) {
24653 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24654 // 64-bit integer, since that's not a legal type. Since
24655 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24656 // bits, but can do this little dance to extract the lowest 32 bits
24657 // and work with those going forward.
24658 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24660 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24662 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24663 Vector32, DAG.getIntPtrConstant(0));
24667 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24668 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24669 DAG.getConstant(1, IntVT));
24670 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24671 return OneBitOfTruth;
24679 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24680 /// so it can be folded inside ANDNP.
24681 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24682 EVT VT = N->getValueType(0);
24684 // Match direct AllOnes for 128 and 256-bit vectors
24685 if (ISD::isBuildVectorAllOnes(N))
24688 // Look through a bit convert.
24689 if (N->getOpcode() == ISD::BITCAST)
24690 N = N->getOperand(0).getNode();
24692 // Sometimes the operand may come from a insert_subvector building a 256-bit
24694 if (VT.is256BitVector() &&
24695 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24696 SDValue V1 = N->getOperand(0);
24697 SDValue V2 = N->getOperand(1);
24699 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24700 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24701 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24702 ISD::isBuildVectorAllOnes(V2.getNode()))
24709 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24710 // register. In most cases we actually compare or select YMM-sized registers
24711 // and mixing the two types creates horrible code. This method optimizes
24712 // some of the transition sequences.
24713 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24714 TargetLowering::DAGCombinerInfo &DCI,
24715 const X86Subtarget *Subtarget) {
24716 EVT VT = N->getValueType(0);
24717 if (!VT.is256BitVector())
24720 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24721 N->getOpcode() == ISD::ZERO_EXTEND ||
24722 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24724 SDValue Narrow = N->getOperand(0);
24725 EVT NarrowVT = Narrow->getValueType(0);
24726 if (!NarrowVT.is128BitVector())
24729 if (Narrow->getOpcode() != ISD::XOR &&
24730 Narrow->getOpcode() != ISD::AND &&
24731 Narrow->getOpcode() != ISD::OR)
24734 SDValue N0 = Narrow->getOperand(0);
24735 SDValue N1 = Narrow->getOperand(1);
24738 // The Left side has to be a trunc.
24739 if (N0.getOpcode() != ISD::TRUNCATE)
24742 // The type of the truncated inputs.
24743 EVT WideVT = N0->getOperand(0)->getValueType(0);
24747 // The right side has to be a 'trunc' or a constant vector.
24748 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24749 ConstantSDNode *RHSConstSplat = nullptr;
24750 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24751 RHSConstSplat = RHSBV->getConstantSplatNode();
24752 if (!RHSTrunc && !RHSConstSplat)
24755 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24757 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24760 // Set N0 and N1 to hold the inputs to the new wide operation.
24761 N0 = N0->getOperand(0);
24762 if (RHSConstSplat) {
24763 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24764 SDValue(RHSConstSplat, 0));
24765 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24766 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24767 } else if (RHSTrunc) {
24768 N1 = N1->getOperand(0);
24771 // Generate the wide operation.
24772 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24773 unsigned Opcode = N->getOpcode();
24775 case ISD::ANY_EXTEND:
24777 case ISD::ZERO_EXTEND: {
24778 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24779 APInt Mask = APInt::getAllOnesValue(InBits);
24780 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24781 return DAG.getNode(ISD::AND, DL, VT,
24782 Op, DAG.getConstant(Mask, VT));
24784 case ISD::SIGN_EXTEND:
24785 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24786 Op, DAG.getValueType(NarrowVT));
24788 llvm_unreachable("Unexpected opcode");
24792 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24793 TargetLowering::DAGCombinerInfo &DCI,
24794 const X86Subtarget *Subtarget) {
24795 EVT VT = N->getValueType(0);
24796 if (DCI.isBeforeLegalizeOps())
24799 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24803 // Create BEXTR instructions
24804 // BEXTR is ((X >> imm) & (2**size-1))
24805 if (VT == MVT::i32 || VT == MVT::i64) {
24806 SDValue N0 = N->getOperand(0);
24807 SDValue N1 = N->getOperand(1);
24810 // Check for BEXTR.
24811 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24812 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24813 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24814 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24815 if (MaskNode && ShiftNode) {
24816 uint64_t Mask = MaskNode->getZExtValue();
24817 uint64_t Shift = ShiftNode->getZExtValue();
24818 if (isMask_64(Mask)) {
24819 uint64_t MaskSize = countPopulation(Mask);
24820 if (Shift + MaskSize <= VT.getSizeInBits())
24821 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24822 DAG.getConstant(Shift | (MaskSize << 8), VT));
24830 // Want to form ANDNP nodes:
24831 // 1) In the hopes of then easily combining them with OR and AND nodes
24832 // to form PBLEND/PSIGN.
24833 // 2) To match ANDN packed intrinsics
24834 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24837 SDValue N0 = N->getOperand(0);
24838 SDValue N1 = N->getOperand(1);
24841 // Check LHS for vnot
24842 if (N0.getOpcode() == ISD::XOR &&
24843 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24844 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24845 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24847 // Check RHS for vnot
24848 if (N1.getOpcode() == ISD::XOR &&
24849 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24850 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24851 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24856 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24857 TargetLowering::DAGCombinerInfo &DCI,
24858 const X86Subtarget *Subtarget) {
24859 if (DCI.isBeforeLegalizeOps())
24862 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24866 SDValue N0 = N->getOperand(0);
24867 SDValue N1 = N->getOperand(1);
24868 EVT VT = N->getValueType(0);
24870 // look for psign/blend
24871 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24872 if (!Subtarget->hasSSSE3() ||
24873 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24876 // Canonicalize pandn to RHS
24877 if (N0.getOpcode() == X86ISD::ANDNP)
24879 // or (and (m, y), (pandn m, x))
24880 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24881 SDValue Mask = N1.getOperand(0);
24882 SDValue X = N1.getOperand(1);
24884 if (N0.getOperand(0) == Mask)
24885 Y = N0.getOperand(1);
24886 if (N0.getOperand(1) == Mask)
24887 Y = N0.getOperand(0);
24889 // Check to see if the mask appeared in both the AND and ANDNP and
24893 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24894 // Look through mask bitcast.
24895 if (Mask.getOpcode() == ISD::BITCAST)
24896 Mask = Mask.getOperand(0);
24897 if (X.getOpcode() == ISD::BITCAST)
24898 X = X.getOperand(0);
24899 if (Y.getOpcode() == ISD::BITCAST)
24900 Y = Y.getOperand(0);
24902 EVT MaskVT = Mask.getValueType();
24904 // Validate that the Mask operand is a vector sra node.
24905 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24906 // there is no psrai.b
24907 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24908 unsigned SraAmt = ~0;
24909 if (Mask.getOpcode() == ISD::SRA) {
24910 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24911 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24912 SraAmt = AmtConst->getZExtValue();
24913 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24914 SDValue SraC = Mask.getOperand(1);
24915 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24917 if ((SraAmt + 1) != EltBits)
24922 // Now we know we at least have a plendvb with the mask val. See if
24923 // we can form a psignb/w/d.
24924 // psign = x.type == y.type == mask.type && y = sub(0, x);
24925 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24926 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24927 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24928 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24929 "Unsupported VT for PSIGN");
24930 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24931 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24933 // PBLENDVB only available on SSE 4.1
24934 if (!Subtarget->hasSSE41())
24937 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
24939 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
24940 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
24941 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
24942 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
24943 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24947 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
24950 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
24951 MachineFunction &MF = DAG.getMachineFunction();
24953 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
24955 // SHLD/SHRD instructions have lower register pressure, but on some
24956 // platforms they have higher latency than the equivalent
24957 // series of shifts/or that would otherwise be generated.
24958 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
24959 // have higher latencies and we are not optimizing for size.
24960 if (!OptForSize && Subtarget->isSHLDSlow())
24963 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
24965 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
24967 if (!N0.hasOneUse() || !N1.hasOneUse())
24970 SDValue ShAmt0 = N0.getOperand(1);
24971 if (ShAmt0.getValueType() != MVT::i8)
24973 SDValue ShAmt1 = N1.getOperand(1);
24974 if (ShAmt1.getValueType() != MVT::i8)
24976 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
24977 ShAmt0 = ShAmt0.getOperand(0);
24978 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
24979 ShAmt1 = ShAmt1.getOperand(0);
24982 unsigned Opc = X86ISD::SHLD;
24983 SDValue Op0 = N0.getOperand(0);
24984 SDValue Op1 = N1.getOperand(0);
24985 if (ShAmt0.getOpcode() == ISD::SUB) {
24986 Opc = X86ISD::SHRD;
24987 std::swap(Op0, Op1);
24988 std::swap(ShAmt0, ShAmt1);
24991 unsigned Bits = VT.getSizeInBits();
24992 if (ShAmt1.getOpcode() == ISD::SUB) {
24993 SDValue Sum = ShAmt1.getOperand(0);
24994 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
24995 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
24996 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
24997 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
24998 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
24999 return DAG.getNode(Opc, DL, VT,
25001 DAG.getNode(ISD::TRUNCATE, DL,
25004 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
25005 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
25007 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
25008 return DAG.getNode(Opc, DL, VT,
25009 N0.getOperand(0), N1.getOperand(0),
25010 DAG.getNode(ISD::TRUNCATE, DL,
25017 // Generate NEG and CMOV for integer abs.
25018 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
25019 EVT VT = N->getValueType(0);
25021 // Since X86 does not have CMOV for 8-bit integer, we don't convert
25022 // 8-bit integer abs to NEG and CMOV.
25023 if (VT.isInteger() && VT.getSizeInBits() == 8)
25026 SDValue N0 = N->getOperand(0);
25027 SDValue N1 = N->getOperand(1);
25030 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
25031 // and change it to SUB and CMOV.
25032 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
25033 N0.getOpcode() == ISD::ADD &&
25034 N0.getOperand(1) == N1 &&
25035 N1.getOpcode() == ISD::SRA &&
25036 N1.getOperand(0) == N0.getOperand(0))
25037 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
25038 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
25039 // Generate SUB & CMOV.
25040 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
25041 DAG.getConstant(0, VT), N0.getOperand(0));
25043 SDValue Ops[] = { N0.getOperand(0), Neg,
25044 DAG.getConstant(X86::COND_GE, MVT::i8),
25045 SDValue(Neg.getNode(), 1) };
25046 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
25051 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
25052 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
25053 TargetLowering::DAGCombinerInfo &DCI,
25054 const X86Subtarget *Subtarget) {
25055 if (DCI.isBeforeLegalizeOps())
25058 if (Subtarget->hasCMov()) {
25059 SDValue RV = performIntegerAbsCombine(N, DAG);
25067 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
25068 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
25069 TargetLowering::DAGCombinerInfo &DCI,
25070 const X86Subtarget *Subtarget) {
25071 LoadSDNode *Ld = cast<LoadSDNode>(N);
25072 EVT RegVT = Ld->getValueType(0);
25073 EVT MemVT = Ld->getMemoryVT();
25075 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25077 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
25078 // into two 16-byte operations.
25079 ISD::LoadExtType Ext = Ld->getExtensionType();
25080 unsigned Alignment = Ld->getAlignment();
25081 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
25082 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25083 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
25084 unsigned NumElems = RegVT.getVectorNumElements();
25088 SDValue Ptr = Ld->getBasePtr();
25089 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
25091 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
25093 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25094 Ld->getPointerInfo(), Ld->isVolatile(),
25095 Ld->isNonTemporal(), Ld->isInvariant(),
25097 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25098 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25099 Ld->getPointerInfo(), Ld->isVolatile(),
25100 Ld->isNonTemporal(), Ld->isInvariant(),
25101 std::min(16U, Alignment));
25102 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
25104 Load2.getValue(1));
25106 SDValue NewVec = DAG.getUNDEF(RegVT);
25107 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
25108 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
25109 return DCI.CombineTo(N, NewVec, TF, true);
25115 /// PerformMLOADCombine - Resolve extending loads
25116 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
25117 TargetLowering::DAGCombinerInfo &DCI,
25118 const X86Subtarget *Subtarget) {
25119 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
25120 if (Mld->getExtensionType() != ISD::SEXTLOAD)
25123 EVT VT = Mld->getValueType(0);
25124 unsigned NumElems = VT.getVectorNumElements();
25125 EVT LdVT = Mld->getMemoryVT();
25128 assert(LdVT != VT && "Cannot extend to the same type");
25129 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
25130 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
25131 // From, To sizes and ElemCount must be pow of two
25132 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25133 "Unexpected size for extending masked load");
25135 unsigned SizeRatio = ToSz / FromSz;
25136 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
25138 // Create a type on which we perform the shuffle
25139 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25140 LdVT.getScalarType(), NumElems*SizeRatio);
25141 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25143 // Convert Src0 value
25144 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
25145 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
25146 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25147 for (unsigned i = 0; i != NumElems; ++i)
25148 ShuffleVec[i] = i * SizeRatio;
25150 // Can't shuffle using an illegal type.
25151 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25152 && "WideVecVT should be legal");
25153 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
25154 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
25156 // Prepare the new mask
25158 SDValue Mask = Mld->getMask();
25159 if (Mask.getValueType() == VT) {
25160 // Mask and original value have the same type
25161 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25162 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25163 for (unsigned i = 0; i != NumElems; ++i)
25164 ShuffleVec[i] = i * SizeRatio;
25165 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25166 ShuffleVec[i] = NumElems*SizeRatio;
25167 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25168 DAG.getConstant(0, WideVecVT),
25172 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25173 unsigned WidenNumElts = NumElems*SizeRatio;
25174 unsigned MaskNumElts = VT.getVectorNumElements();
25175 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25178 unsigned NumConcat = WidenNumElts / MaskNumElts;
25179 SmallVector<SDValue, 16> Ops(NumConcat);
25180 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25182 for (unsigned i = 1; i != NumConcat; ++i)
25185 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25188 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
25189 Mld->getBasePtr(), NewMask, WideSrc0,
25190 Mld->getMemoryVT(), Mld->getMemOperand(),
25192 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
25193 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
25196 /// PerformMSTORECombine - Resolve truncating stores
25197 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
25198 const X86Subtarget *Subtarget) {
25199 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
25200 if (!Mst->isTruncatingStore())
25203 EVT VT = Mst->getValue().getValueType();
25204 unsigned NumElems = VT.getVectorNumElements();
25205 EVT StVT = Mst->getMemoryVT();
25208 assert(StVT != VT && "Cannot truncate to the same type");
25209 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25210 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25212 // From, To sizes and ElemCount must be pow of two
25213 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25214 "Unexpected size for truncating masked store");
25215 // We are going to use the original vector elt for storing.
25216 // Accumulated smaller vector elements must be a multiple of the store size.
25217 assert (((NumElems * FromSz) % ToSz) == 0 &&
25218 "Unexpected ratio for truncating masked store");
25220 unsigned SizeRatio = FromSz / ToSz;
25221 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25223 // Create a type on which we perform the shuffle
25224 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25225 StVT.getScalarType(), NumElems*SizeRatio);
25227 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25229 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
25230 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25231 for (unsigned i = 0; i != NumElems; ++i)
25232 ShuffleVec[i] = i * SizeRatio;
25234 // Can't shuffle using an illegal type.
25235 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25236 && "WideVecVT should be legal");
25238 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25239 DAG.getUNDEF(WideVecVT),
25243 SDValue Mask = Mst->getMask();
25244 if (Mask.getValueType() == VT) {
25245 // Mask and original value have the same type
25246 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25247 for (unsigned i = 0; i != NumElems; ++i)
25248 ShuffleVec[i] = i * SizeRatio;
25249 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25250 ShuffleVec[i] = NumElems*SizeRatio;
25251 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25252 DAG.getConstant(0, WideVecVT),
25256 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25257 unsigned WidenNumElts = NumElems*SizeRatio;
25258 unsigned MaskNumElts = VT.getVectorNumElements();
25259 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25262 unsigned NumConcat = WidenNumElts / MaskNumElts;
25263 SmallVector<SDValue, 16> Ops(NumConcat);
25264 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25266 for (unsigned i = 1; i != NumConcat; ++i)
25269 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25272 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
25273 NewMask, StVT, Mst->getMemOperand(), false);
25275 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
25276 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
25277 const X86Subtarget *Subtarget) {
25278 StoreSDNode *St = cast<StoreSDNode>(N);
25279 EVT VT = St->getValue().getValueType();
25280 EVT StVT = St->getMemoryVT();
25282 SDValue StoredVal = St->getOperand(1);
25283 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25285 // If we are saving a concatenation of two XMM registers and 32-byte stores
25286 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
25287 unsigned Alignment = St->getAlignment();
25288 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
25289 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25290 StVT == VT && !IsAligned) {
25291 unsigned NumElems = VT.getVectorNumElements();
25295 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
25296 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
25298 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
25299 SDValue Ptr0 = St->getBasePtr();
25300 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
25302 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
25303 St->getPointerInfo(), St->isVolatile(),
25304 St->isNonTemporal(), Alignment);
25305 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
25306 St->getPointerInfo(), St->isVolatile(),
25307 St->isNonTemporal(),
25308 std::min(16U, Alignment));
25309 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
25312 // Optimize trunc store (of multiple scalars) to shuffle and store.
25313 // First, pack all of the elements in one place. Next, store to memory
25314 // in fewer chunks.
25315 if (St->isTruncatingStore() && VT.isVector()) {
25316 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25317 unsigned NumElems = VT.getVectorNumElements();
25318 assert(StVT != VT && "Cannot truncate to the same type");
25319 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25320 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25322 // From, To sizes and ElemCount must be pow of two
25323 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
25324 // We are going to use the original vector elt for storing.
25325 // Accumulated smaller vector elements must be a multiple of the store size.
25326 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
25328 unsigned SizeRatio = FromSz / ToSz;
25330 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25332 // Create a type on which we perform the shuffle
25333 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25334 StVT.getScalarType(), NumElems*SizeRatio);
25336 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25338 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
25339 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
25340 for (unsigned i = 0; i != NumElems; ++i)
25341 ShuffleVec[i] = i * SizeRatio;
25343 // Can't shuffle using an illegal type.
25344 if (!TLI.isTypeLegal(WideVecVT))
25347 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25348 DAG.getUNDEF(WideVecVT),
25350 // At this point all of the data is stored at the bottom of the
25351 // register. We now need to save it to mem.
25353 // Find the largest store unit
25354 MVT StoreType = MVT::i8;
25355 for (MVT Tp : MVT::integer_valuetypes()) {
25356 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
25360 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
25361 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
25362 (64 <= NumElems * ToSz))
25363 StoreType = MVT::f64;
25365 // Bitcast the original vector into a vector of store-size units
25366 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
25367 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
25368 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
25369 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
25370 SmallVector<SDValue, 8> Chains;
25371 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25372 TLI.getPointerTy());
25373 SDValue Ptr = St->getBasePtr();
25375 // Perform one or more big stores into memory.
25376 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25377 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25378 StoreType, ShuffWide,
25379 DAG.getIntPtrConstant(i));
25380 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25381 St->getPointerInfo(), St->isVolatile(),
25382 St->isNonTemporal(), St->getAlignment());
25383 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25384 Chains.push_back(Ch);
25387 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25390 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25391 // the FP state in cases where an emms may be missing.
25392 // A preferable solution to the general problem is to figure out the right
25393 // places to insert EMMS. This qualifies as a quick hack.
25395 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25396 if (VT.getSizeInBits() != 64)
25399 const Function *F = DAG.getMachineFunction().getFunction();
25400 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
25401 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25402 && Subtarget->hasSSE2();
25403 if ((VT.isVector() ||
25404 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25405 isa<LoadSDNode>(St->getValue()) &&
25406 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25407 St->getChain().hasOneUse() && !St->isVolatile()) {
25408 SDNode* LdVal = St->getValue().getNode();
25409 LoadSDNode *Ld = nullptr;
25410 int TokenFactorIndex = -1;
25411 SmallVector<SDValue, 8> Ops;
25412 SDNode* ChainVal = St->getChain().getNode();
25413 // Must be a store of a load. We currently handle two cases: the load
25414 // is a direct child, and it's under an intervening TokenFactor. It is
25415 // possible to dig deeper under nested TokenFactors.
25416 if (ChainVal == LdVal)
25417 Ld = cast<LoadSDNode>(St->getChain());
25418 else if (St->getValue().hasOneUse() &&
25419 ChainVal->getOpcode() == ISD::TokenFactor) {
25420 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25421 if (ChainVal->getOperand(i).getNode() == LdVal) {
25422 TokenFactorIndex = i;
25423 Ld = cast<LoadSDNode>(St->getValue());
25425 Ops.push_back(ChainVal->getOperand(i));
25429 if (!Ld || !ISD::isNormalLoad(Ld))
25432 // If this is not the MMX case, i.e. we are just turning i64 load/store
25433 // into f64 load/store, avoid the transformation if there are multiple
25434 // uses of the loaded value.
25435 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25440 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25441 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25443 if (Subtarget->is64Bit() || F64IsLegal) {
25444 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25445 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25446 Ld->getPointerInfo(), Ld->isVolatile(),
25447 Ld->isNonTemporal(), Ld->isInvariant(),
25448 Ld->getAlignment());
25449 SDValue NewChain = NewLd.getValue(1);
25450 if (TokenFactorIndex != -1) {
25451 Ops.push_back(NewChain);
25452 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25454 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25455 St->getPointerInfo(),
25456 St->isVolatile(), St->isNonTemporal(),
25457 St->getAlignment());
25460 // Otherwise, lower to two pairs of 32-bit loads / stores.
25461 SDValue LoAddr = Ld->getBasePtr();
25462 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25463 DAG.getConstant(4, MVT::i32));
25465 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25466 Ld->getPointerInfo(),
25467 Ld->isVolatile(), Ld->isNonTemporal(),
25468 Ld->isInvariant(), Ld->getAlignment());
25469 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25470 Ld->getPointerInfo().getWithOffset(4),
25471 Ld->isVolatile(), Ld->isNonTemporal(),
25473 MinAlign(Ld->getAlignment(), 4));
25475 SDValue NewChain = LoLd.getValue(1);
25476 if (TokenFactorIndex != -1) {
25477 Ops.push_back(LoLd);
25478 Ops.push_back(HiLd);
25479 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25482 LoAddr = St->getBasePtr();
25483 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25484 DAG.getConstant(4, MVT::i32));
25486 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25487 St->getPointerInfo(),
25488 St->isVolatile(), St->isNonTemporal(),
25489 St->getAlignment());
25490 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25491 St->getPointerInfo().getWithOffset(4),
25493 St->isNonTemporal(),
25494 MinAlign(St->getAlignment(), 4));
25495 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25500 /// Return 'true' if this vector operation is "horizontal"
25501 /// and return the operands for the horizontal operation in LHS and RHS. A
25502 /// horizontal operation performs the binary operation on successive elements
25503 /// of its first operand, then on successive elements of its second operand,
25504 /// returning the resulting values in a vector. For example, if
25505 /// A = < float a0, float a1, float a2, float a3 >
25507 /// B = < float b0, float b1, float b2, float b3 >
25508 /// then the result of doing a horizontal operation on A and B is
25509 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25510 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25511 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25512 /// set to A, RHS to B, and the routine returns 'true'.
25513 /// Note that the binary operation should have the property that if one of the
25514 /// operands is UNDEF then the result is UNDEF.
25515 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25516 // Look for the following pattern: if
25517 // A = < float a0, float a1, float a2, float a3 >
25518 // B = < float b0, float b1, float b2, float b3 >
25520 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25521 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25522 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25523 // which is A horizontal-op B.
25525 // At least one of the operands should be a vector shuffle.
25526 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25527 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25530 MVT VT = LHS.getSimpleValueType();
25532 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25533 "Unsupported vector type for horizontal add/sub");
25535 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25536 // operate independently on 128-bit lanes.
25537 unsigned NumElts = VT.getVectorNumElements();
25538 unsigned NumLanes = VT.getSizeInBits()/128;
25539 unsigned NumLaneElts = NumElts / NumLanes;
25540 assert((NumLaneElts % 2 == 0) &&
25541 "Vector type should have an even number of elements in each lane");
25542 unsigned HalfLaneElts = NumLaneElts/2;
25544 // View LHS in the form
25545 // LHS = VECTOR_SHUFFLE A, B, LMask
25546 // If LHS is not a shuffle then pretend it is the shuffle
25547 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25548 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25551 SmallVector<int, 16> LMask(NumElts);
25552 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25553 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25554 A = LHS.getOperand(0);
25555 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25556 B = LHS.getOperand(1);
25557 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25558 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25560 if (LHS.getOpcode() != ISD::UNDEF)
25562 for (unsigned i = 0; i != NumElts; ++i)
25566 // Likewise, view RHS in the form
25567 // RHS = VECTOR_SHUFFLE C, D, RMask
25569 SmallVector<int, 16> RMask(NumElts);
25570 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25571 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25572 C = RHS.getOperand(0);
25573 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25574 D = RHS.getOperand(1);
25575 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25576 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25578 if (RHS.getOpcode() != ISD::UNDEF)
25580 for (unsigned i = 0; i != NumElts; ++i)
25584 // Check that the shuffles are both shuffling the same vectors.
25585 if (!(A == C && B == D) && !(A == D && B == C))
25588 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25589 if (!A.getNode() && !B.getNode())
25592 // If A and B occur in reverse order in RHS, then "swap" them (which means
25593 // rewriting the mask).
25595 CommuteVectorShuffleMask(RMask, NumElts);
25597 // At this point LHS and RHS are equivalent to
25598 // LHS = VECTOR_SHUFFLE A, B, LMask
25599 // RHS = VECTOR_SHUFFLE A, B, RMask
25600 // Check that the masks correspond to performing a horizontal operation.
25601 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25602 for (unsigned i = 0; i != NumLaneElts; ++i) {
25603 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25605 // Ignore any UNDEF components.
25606 if (LIdx < 0 || RIdx < 0 ||
25607 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25608 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25611 // Check that successive elements are being operated on. If not, this is
25612 // not a horizontal operation.
25613 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25614 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25615 if (!(LIdx == Index && RIdx == Index + 1) &&
25616 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25621 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25622 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25626 /// Do target-specific dag combines on floating point adds.
25627 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25628 const X86Subtarget *Subtarget) {
25629 EVT VT = N->getValueType(0);
25630 SDValue LHS = N->getOperand(0);
25631 SDValue RHS = N->getOperand(1);
25633 // Try to synthesize horizontal adds from adds of shuffles.
25634 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25635 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25636 isHorizontalBinOp(LHS, RHS, true))
25637 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25641 /// Do target-specific dag combines on floating point subs.
25642 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25643 const X86Subtarget *Subtarget) {
25644 EVT VT = N->getValueType(0);
25645 SDValue LHS = N->getOperand(0);
25646 SDValue RHS = N->getOperand(1);
25648 // Try to synthesize horizontal subs from subs of shuffles.
25649 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25650 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25651 isHorizontalBinOp(LHS, RHS, false))
25652 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25656 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25657 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25658 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25660 // F[X]OR(0.0, x) -> x
25661 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25662 if (C->getValueAPF().isPosZero())
25663 return N->getOperand(1);
25665 // F[X]OR(x, 0.0) -> x
25666 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25667 if (C->getValueAPF().isPosZero())
25668 return N->getOperand(0);
25672 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25673 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25674 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25676 // Only perform optimizations if UnsafeMath is used.
25677 if (!DAG.getTarget().Options.UnsafeFPMath)
25680 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25681 // into FMINC and FMAXC, which are Commutative operations.
25682 unsigned NewOp = 0;
25683 switch (N->getOpcode()) {
25684 default: llvm_unreachable("unknown opcode");
25685 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25686 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25689 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25690 N->getOperand(0), N->getOperand(1));
25693 /// Do target-specific dag combines on X86ISD::FAND nodes.
25694 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25695 // FAND(0.0, x) -> 0.0
25696 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25697 if (C->getValueAPF().isPosZero())
25698 return N->getOperand(0);
25700 // FAND(x, 0.0) -> 0.0
25701 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25702 if (C->getValueAPF().isPosZero())
25703 return N->getOperand(1);
25708 /// Do target-specific dag combines on X86ISD::FANDN nodes
25709 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25710 // FANDN(0.0, x) -> x
25711 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25712 if (C->getValueAPF().isPosZero())
25713 return N->getOperand(1);
25715 // FANDN(x, 0.0) -> 0.0
25716 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25717 if (C->getValueAPF().isPosZero())
25718 return N->getOperand(1);
25723 static SDValue PerformBTCombine(SDNode *N,
25725 TargetLowering::DAGCombinerInfo &DCI) {
25726 // BT ignores high bits in the bit index operand.
25727 SDValue Op1 = N->getOperand(1);
25728 if (Op1.hasOneUse()) {
25729 unsigned BitWidth = Op1.getValueSizeInBits();
25730 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25731 APInt KnownZero, KnownOne;
25732 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25733 !DCI.isBeforeLegalizeOps());
25734 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25735 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25736 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25737 DCI.CommitTargetLoweringOpt(TLO);
25742 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25743 SDValue Op = N->getOperand(0);
25744 if (Op.getOpcode() == ISD::BITCAST)
25745 Op = Op.getOperand(0);
25746 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25747 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25748 VT.getVectorElementType().getSizeInBits() ==
25749 OpVT.getVectorElementType().getSizeInBits()) {
25750 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25755 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25756 const X86Subtarget *Subtarget) {
25757 EVT VT = N->getValueType(0);
25758 if (!VT.isVector())
25761 SDValue N0 = N->getOperand(0);
25762 SDValue N1 = N->getOperand(1);
25763 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25766 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25767 // both SSE and AVX2 since there is no sign-extended shift right
25768 // operation on a vector with 64-bit elements.
25769 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25770 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25771 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25772 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25773 SDValue N00 = N0.getOperand(0);
25775 // EXTLOAD has a better solution on AVX2,
25776 // it may be replaced with X86ISD::VSEXT node.
25777 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25778 if (!ISD::isNormalLoad(N00.getNode()))
25781 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25782 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25784 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25790 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25791 TargetLowering::DAGCombinerInfo &DCI,
25792 const X86Subtarget *Subtarget) {
25793 SDValue N0 = N->getOperand(0);
25794 EVT VT = N->getValueType(0);
25796 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25797 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25798 // This exposes the sext to the sdivrem lowering, so that it directly extends
25799 // from AH (which we otherwise need to do contortions to access).
25800 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25801 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25803 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25804 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25805 N0.getOperand(0), N0.getOperand(1));
25806 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25807 return R.getValue(1);
25810 if (!DCI.isBeforeLegalizeOps())
25813 if (!Subtarget->hasFp256())
25816 if (VT.isVector() && VT.getSizeInBits() == 256) {
25817 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25825 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25826 const X86Subtarget* Subtarget) {
25828 EVT VT = N->getValueType(0);
25830 // Let legalize expand this if it isn't a legal type yet.
25831 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25834 EVT ScalarVT = VT.getScalarType();
25835 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25836 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25839 SDValue A = N->getOperand(0);
25840 SDValue B = N->getOperand(1);
25841 SDValue C = N->getOperand(2);
25843 bool NegA = (A.getOpcode() == ISD::FNEG);
25844 bool NegB = (B.getOpcode() == ISD::FNEG);
25845 bool NegC = (C.getOpcode() == ISD::FNEG);
25847 // Negative multiplication when NegA xor NegB
25848 bool NegMul = (NegA != NegB);
25850 A = A.getOperand(0);
25852 B = B.getOperand(0);
25854 C = C.getOperand(0);
25858 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25860 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25862 return DAG.getNode(Opcode, dl, VT, A, B, C);
25865 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25866 TargetLowering::DAGCombinerInfo &DCI,
25867 const X86Subtarget *Subtarget) {
25868 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25869 // (and (i32 x86isd::setcc_carry), 1)
25870 // This eliminates the zext. This transformation is necessary because
25871 // ISD::SETCC is always legalized to i8.
25873 SDValue N0 = N->getOperand(0);
25874 EVT VT = N->getValueType(0);
25876 if (N0.getOpcode() == ISD::AND &&
25878 N0.getOperand(0).hasOneUse()) {
25879 SDValue N00 = N0.getOperand(0);
25880 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25881 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25882 if (!C || C->getZExtValue() != 1)
25884 return DAG.getNode(ISD::AND, dl, VT,
25885 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25886 N00.getOperand(0), N00.getOperand(1)),
25887 DAG.getConstant(1, VT));
25891 if (N0.getOpcode() == ISD::TRUNCATE &&
25893 N0.getOperand(0).hasOneUse()) {
25894 SDValue N00 = N0.getOperand(0);
25895 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25896 return DAG.getNode(ISD::AND, dl, VT,
25897 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25898 N00.getOperand(0), N00.getOperand(1)),
25899 DAG.getConstant(1, VT));
25902 if (VT.is256BitVector()) {
25903 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25908 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25909 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25910 // This exposes the zext to the udivrem lowering, so that it directly extends
25911 // from AH (which we otherwise need to do contortions to access).
25912 if (N0.getOpcode() == ISD::UDIVREM &&
25913 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25914 (VT == MVT::i32 || VT == MVT::i64)) {
25915 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25916 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25917 N0.getOperand(0), N0.getOperand(1));
25918 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25919 return R.getValue(1);
25925 // Optimize x == -y --> x+y == 0
25926 // x != -y --> x+y != 0
25927 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25928 const X86Subtarget* Subtarget) {
25929 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25930 SDValue LHS = N->getOperand(0);
25931 SDValue RHS = N->getOperand(1);
25932 EVT VT = N->getValueType(0);
25935 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
25936 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
25937 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
25938 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25939 LHS.getValueType(), RHS, LHS.getOperand(1));
25940 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25941 addV, DAG.getConstant(0, addV.getValueType()), CC);
25943 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
25944 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
25945 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
25946 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25947 RHS.getValueType(), LHS, RHS.getOperand(1));
25948 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25949 addV, DAG.getConstant(0, addV.getValueType()), CC);
25952 if (VT.getScalarType() == MVT::i1) {
25953 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
25954 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25955 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
25956 if (!IsSEXT0 && !IsVZero0)
25958 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
25959 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25960 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
25962 if (!IsSEXT1 && !IsVZero1)
25965 if (IsSEXT0 && IsVZero1) {
25966 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
25967 if (CC == ISD::SETEQ)
25968 return DAG.getNOT(DL, LHS.getOperand(0), VT);
25969 return LHS.getOperand(0);
25971 if (IsSEXT1 && IsVZero0) {
25972 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
25973 if (CC == ISD::SETEQ)
25974 return DAG.getNOT(DL, RHS.getOperand(0), VT);
25975 return RHS.getOperand(0);
25982 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
25983 const X86Subtarget *Subtarget) {
25985 MVT VT = N->getOperand(1)->getSimpleValueType(0);
25986 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
25987 "X86insertps is only defined for v4x32");
25989 SDValue Ld = N->getOperand(1);
25990 if (MayFoldLoad(Ld)) {
25991 // Extract the countS bits from the immediate so we can get the proper
25992 // address when narrowing the vector load to a specific element.
25993 // When the second source op is a memory address, interps doesn't use
25994 // countS and just gets an f32 from that address.
25995 unsigned DestIndex =
25996 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
25997 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
26001 // Create this as a scalar to vector to match the instruction pattern.
26002 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
26003 // countS bits are ignored when loading from memory on insertps, which
26004 // means we don't need to explicitly set them to 0.
26005 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
26006 LoadScalarToVector, N->getOperand(2));
26009 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
26010 // as "sbb reg,reg", since it can be extended without zext and produces
26011 // an all-ones bit which is more useful than 0/1 in some cases.
26012 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
26015 return DAG.getNode(ISD::AND, DL, VT,
26016 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
26017 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
26018 DAG.getConstant(1, VT));
26019 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
26020 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
26021 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
26022 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
26025 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
26026 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
26027 TargetLowering::DAGCombinerInfo &DCI,
26028 const X86Subtarget *Subtarget) {
26030 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
26031 SDValue EFLAGS = N->getOperand(1);
26033 if (CC == X86::COND_A) {
26034 // Try to convert COND_A into COND_B in an attempt to facilitate
26035 // materializing "setb reg".
26037 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
26038 // cannot take an immediate as its first operand.
26040 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
26041 EFLAGS.getValueType().isInteger() &&
26042 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
26043 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
26044 EFLAGS.getNode()->getVTList(),
26045 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
26046 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
26047 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
26051 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
26052 // a zext and produces an all-ones bit which is more useful than 0/1 in some
26054 if (CC == X86::COND_B)
26055 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
26059 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26060 if (Flags.getNode()) {
26061 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26062 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
26068 // Optimize branch condition evaluation.
26070 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
26071 TargetLowering::DAGCombinerInfo &DCI,
26072 const X86Subtarget *Subtarget) {
26074 SDValue Chain = N->getOperand(0);
26075 SDValue Dest = N->getOperand(1);
26076 SDValue EFLAGS = N->getOperand(3);
26077 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
26081 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26082 if (Flags.getNode()) {
26083 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26084 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
26091 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
26092 SelectionDAG &DAG) {
26093 // Take advantage of vector comparisons producing 0 or -1 in each lane to
26094 // optimize away operation when it's from a constant.
26096 // The general transformation is:
26097 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
26098 // AND(VECTOR_CMP(x,y), constant2)
26099 // constant2 = UNARYOP(constant)
26101 // Early exit if this isn't a vector operation, the operand of the
26102 // unary operation isn't a bitwise AND, or if the sizes of the operations
26103 // aren't the same.
26104 EVT VT = N->getValueType(0);
26105 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
26106 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
26107 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
26110 // Now check that the other operand of the AND is a constant. We could
26111 // make the transformation for non-constant splats as well, but it's unclear
26112 // that would be a benefit as it would not eliminate any operations, just
26113 // perform one more step in scalar code before moving to the vector unit.
26114 if (BuildVectorSDNode *BV =
26115 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
26116 // Bail out if the vector isn't a constant.
26117 if (!BV->isConstant())
26120 // Everything checks out. Build up the new and improved node.
26122 EVT IntVT = BV->getValueType(0);
26123 // Create a new constant of the appropriate type for the transformed
26125 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
26126 // The AND node needs bitcasts to/from an integer vector type around it.
26127 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
26128 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
26129 N->getOperand(0)->getOperand(0), MaskConst);
26130 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
26137 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
26138 const X86Subtarget *Subtarget) {
26139 // First try to optimize away the conversion entirely when it's
26140 // conditionally from a constant. Vectors only.
26141 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
26142 if (Res != SDValue())
26145 // Now move on to more general possibilities.
26146 SDValue Op0 = N->getOperand(0);
26147 EVT InVT = Op0->getValueType(0);
26149 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
26150 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
26152 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
26153 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
26154 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
26157 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
26158 // a 32-bit target where SSE doesn't support i64->FP operations.
26159 if (Op0.getOpcode() == ISD::LOAD) {
26160 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
26161 EVT VT = Ld->getValueType(0);
26162 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
26163 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
26164 !Subtarget->is64Bit() && VT == MVT::i64) {
26165 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
26166 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
26167 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
26174 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
26175 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
26176 X86TargetLowering::DAGCombinerInfo &DCI) {
26177 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
26178 // the result is either zero or one (depending on the input carry bit).
26179 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
26180 if (X86::isZeroNode(N->getOperand(0)) &&
26181 X86::isZeroNode(N->getOperand(1)) &&
26182 // We don't have a good way to replace an EFLAGS use, so only do this when
26184 SDValue(N, 1).use_empty()) {
26186 EVT VT = N->getValueType(0);
26187 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
26188 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
26189 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
26190 DAG.getConstant(X86::COND_B,MVT::i8),
26192 DAG.getConstant(1, VT));
26193 return DCI.CombineTo(N, Res1, CarryOut);
26199 // fold (add Y, (sete X, 0)) -> adc 0, Y
26200 // (add Y, (setne X, 0)) -> sbb -1, Y
26201 // (sub (sete X, 0), Y) -> sbb 0, Y
26202 // (sub (setne X, 0), Y) -> adc -1, Y
26203 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
26206 // Look through ZExts.
26207 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
26208 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
26211 SDValue SetCC = Ext.getOperand(0);
26212 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
26215 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
26216 if (CC != X86::COND_E && CC != X86::COND_NE)
26219 SDValue Cmp = SetCC.getOperand(1);
26220 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
26221 !X86::isZeroNode(Cmp.getOperand(1)) ||
26222 !Cmp.getOperand(0).getValueType().isInteger())
26225 SDValue CmpOp0 = Cmp.getOperand(0);
26226 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
26227 DAG.getConstant(1, CmpOp0.getValueType()));
26229 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
26230 if (CC == X86::COND_NE)
26231 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
26232 DL, OtherVal.getValueType(), OtherVal,
26233 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
26234 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
26235 DL, OtherVal.getValueType(), OtherVal,
26236 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
26239 /// PerformADDCombine - Do target-specific dag combines on integer adds.
26240 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
26241 const X86Subtarget *Subtarget) {
26242 EVT VT = N->getValueType(0);
26243 SDValue Op0 = N->getOperand(0);
26244 SDValue Op1 = N->getOperand(1);
26246 // Try to synthesize horizontal adds from adds of shuffles.
26247 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26248 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26249 isHorizontalBinOp(Op0, Op1, true))
26250 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
26252 return OptimizeConditionalInDecrement(N, DAG);
26255 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
26256 const X86Subtarget *Subtarget) {
26257 SDValue Op0 = N->getOperand(0);
26258 SDValue Op1 = N->getOperand(1);
26260 // X86 can't encode an immediate LHS of a sub. See if we can push the
26261 // negation into a preceding instruction.
26262 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
26263 // If the RHS of the sub is a XOR with one use and a constant, invert the
26264 // immediate. Then add one to the LHS of the sub so we can turn
26265 // X-Y -> X+~Y+1, saving one register.
26266 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
26267 isa<ConstantSDNode>(Op1.getOperand(1))) {
26268 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
26269 EVT VT = Op0.getValueType();
26270 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
26272 DAG.getConstant(~XorC, VT));
26273 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
26274 DAG.getConstant(C->getAPIntValue()+1, VT));
26278 // Try to synthesize horizontal adds from adds of shuffles.
26279 EVT VT = N->getValueType(0);
26280 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26281 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26282 isHorizontalBinOp(Op0, Op1, true))
26283 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
26285 return OptimizeConditionalInDecrement(N, DAG);
26288 /// performVZEXTCombine - Performs build vector combines
26289 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
26290 TargetLowering::DAGCombinerInfo &DCI,
26291 const X86Subtarget *Subtarget) {
26293 MVT VT = N->getSimpleValueType(0);
26294 SDValue Op = N->getOperand(0);
26295 MVT OpVT = Op.getSimpleValueType();
26296 MVT OpEltVT = OpVT.getVectorElementType();
26297 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
26299 // (vzext (bitcast (vzext (x)) -> (vzext x)
26301 while (V.getOpcode() == ISD::BITCAST)
26302 V = V.getOperand(0);
26304 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
26305 MVT InnerVT = V.getSimpleValueType();
26306 MVT InnerEltVT = InnerVT.getVectorElementType();
26308 // If the element sizes match exactly, we can just do one larger vzext. This
26309 // is always an exact type match as vzext operates on integer types.
26310 if (OpEltVT == InnerEltVT) {
26311 assert(OpVT == InnerVT && "Types must match for vzext!");
26312 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
26315 // The only other way we can combine them is if only a single element of the
26316 // inner vzext is used in the input to the outer vzext.
26317 if (InnerEltVT.getSizeInBits() < InputBits)
26320 // In this case, the inner vzext is completely dead because we're going to
26321 // only look at bits inside of the low element. Just do the outer vzext on
26322 // a bitcast of the input to the inner.
26323 return DAG.getNode(X86ISD::VZEXT, DL, VT,
26324 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
26327 // Check if we can bypass extracting and re-inserting an element of an input
26328 // vector. Essentialy:
26329 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
26330 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
26331 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
26332 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
26333 SDValue ExtractedV = V.getOperand(0);
26334 SDValue OrigV = ExtractedV.getOperand(0);
26335 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
26336 if (ExtractIdx->getZExtValue() == 0) {
26337 MVT OrigVT = OrigV.getSimpleValueType();
26338 // Extract a subvector if necessary...
26339 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
26340 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
26341 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
26342 OrigVT.getVectorNumElements() / Ratio);
26343 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
26344 DAG.getIntPtrConstant(0));
26346 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
26347 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
26354 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
26355 DAGCombinerInfo &DCI) const {
26356 SelectionDAG &DAG = DCI.DAG;
26357 switch (N->getOpcode()) {
26359 case ISD::EXTRACT_VECTOR_ELT:
26360 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
26363 case X86ISD::SHRUNKBLEND:
26364 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
26365 case ISD::BITCAST: return PerformBITCASTCombine(N, DAG);
26366 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
26367 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
26368 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
26369 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
26370 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
26373 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
26374 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
26375 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
26376 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26377 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26378 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26379 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26380 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26381 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
26382 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26383 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26385 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26387 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26388 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26389 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26390 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26391 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26392 case ISD::ANY_EXTEND:
26393 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26394 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26395 case ISD::SIGN_EXTEND_INREG:
26396 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26397 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26398 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26399 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26400 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26401 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26402 case X86ISD::SHUFP: // Handle all target specific shuffles
26403 case X86ISD::PALIGNR:
26404 case X86ISD::UNPCKH:
26405 case X86ISD::UNPCKL:
26406 case X86ISD::MOVHLPS:
26407 case X86ISD::MOVLHPS:
26408 case X86ISD::PSHUFB:
26409 case X86ISD::PSHUFD:
26410 case X86ISD::PSHUFHW:
26411 case X86ISD::PSHUFLW:
26412 case X86ISD::MOVSS:
26413 case X86ISD::MOVSD:
26414 case X86ISD::VPERMILPI:
26415 case X86ISD::VPERM2X128:
26416 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26417 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26418 case ISD::INTRINSIC_WO_CHAIN:
26419 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26420 case X86ISD::INSERTPS: {
26421 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
26422 return PerformINSERTPSCombine(N, DAG, Subtarget);
26425 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26431 /// isTypeDesirableForOp - Return true if the target has native support for
26432 /// the specified value type and it is 'desirable' to use the type for the
26433 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26434 /// instruction encodings are longer and some i16 instructions are slow.
26435 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26436 if (!isTypeLegal(VT))
26438 if (VT != MVT::i16)
26445 case ISD::SIGN_EXTEND:
26446 case ISD::ZERO_EXTEND:
26447 case ISD::ANY_EXTEND:
26460 /// IsDesirableToPromoteOp - This method query the target whether it is
26461 /// beneficial for dag combiner to promote the specified node. If true, it
26462 /// should return the desired promotion type by reference.
26463 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26464 EVT VT = Op.getValueType();
26465 if (VT != MVT::i16)
26468 bool Promote = false;
26469 bool Commute = false;
26470 switch (Op.getOpcode()) {
26473 LoadSDNode *LD = cast<LoadSDNode>(Op);
26474 // If the non-extending load has a single use and it's not live out, then it
26475 // might be folded.
26476 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26477 Op.hasOneUse()*/) {
26478 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26479 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26480 // The only case where we'd want to promote LOAD (rather then it being
26481 // promoted as an operand is when it's only use is liveout.
26482 if (UI->getOpcode() != ISD::CopyToReg)
26489 case ISD::SIGN_EXTEND:
26490 case ISD::ZERO_EXTEND:
26491 case ISD::ANY_EXTEND:
26496 SDValue N0 = Op.getOperand(0);
26497 // Look out for (store (shl (load), x)).
26498 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26511 SDValue N0 = Op.getOperand(0);
26512 SDValue N1 = Op.getOperand(1);
26513 if (!Commute && MayFoldLoad(N1))
26515 // Avoid disabling potential load folding opportunities.
26516 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26518 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26528 //===----------------------------------------------------------------------===//
26529 // X86 Inline Assembly Support
26530 //===----------------------------------------------------------------------===//
26533 // Helper to match a string separated by whitespace.
26534 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26535 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26537 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26538 StringRef piece(*args[i]);
26539 if (!s.startswith(piece)) // Check if the piece matches.
26542 s = s.substr(piece.size());
26543 StringRef::size_type pos = s.find_first_not_of(" \t");
26544 if (pos == 0) // We matched a prefix.
26552 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26555 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26557 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26558 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26559 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26560 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26562 if (AsmPieces.size() == 3)
26564 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26571 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26572 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26574 std::string AsmStr = IA->getAsmString();
26576 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26577 if (!Ty || Ty->getBitWidth() % 16 != 0)
26580 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26581 SmallVector<StringRef, 4> AsmPieces;
26582 SplitString(AsmStr, AsmPieces, ";\n");
26584 switch (AsmPieces.size()) {
26585 default: return false;
26587 // FIXME: this should verify that we are targeting a 486 or better. If not,
26588 // we will turn this bswap into something that will be lowered to logical
26589 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26590 // lower so don't worry about this.
26592 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26593 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26594 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26595 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26596 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26597 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26598 // No need to check constraints, nothing other than the equivalent of
26599 // "=r,0" would be valid here.
26600 return IntrinsicLowering::LowerToByteSwap(CI);
26603 // rorw $$8, ${0:w} --> llvm.bswap.i16
26604 if (CI->getType()->isIntegerTy(16) &&
26605 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26606 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26607 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26609 const std::string &ConstraintsStr = IA->getConstraintString();
26610 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26611 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26612 if (clobbersFlagRegisters(AsmPieces))
26613 return IntrinsicLowering::LowerToByteSwap(CI);
26617 if (CI->getType()->isIntegerTy(32) &&
26618 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26619 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26620 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26621 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26623 const std::string &ConstraintsStr = IA->getConstraintString();
26624 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26625 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26626 if (clobbersFlagRegisters(AsmPieces))
26627 return IntrinsicLowering::LowerToByteSwap(CI);
26630 if (CI->getType()->isIntegerTy(64)) {
26631 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26632 if (Constraints.size() >= 2 &&
26633 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26634 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26635 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26636 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26637 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26638 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26639 return IntrinsicLowering::LowerToByteSwap(CI);
26647 /// getConstraintType - Given a constraint letter, return the type of
26648 /// constraint it is for this target.
26649 X86TargetLowering::ConstraintType
26650 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26651 if (Constraint.size() == 1) {
26652 switch (Constraint[0]) {
26663 return C_RegisterClass;
26687 return TargetLowering::getConstraintType(Constraint);
26690 /// Examine constraint type and operand type and determine a weight value.
26691 /// This object must already have been set up with the operand type
26692 /// and the current alternative constraint selected.
26693 TargetLowering::ConstraintWeight
26694 X86TargetLowering::getSingleConstraintMatchWeight(
26695 AsmOperandInfo &info, const char *constraint) const {
26696 ConstraintWeight weight = CW_Invalid;
26697 Value *CallOperandVal = info.CallOperandVal;
26698 // If we don't have a value, we can't do a match,
26699 // but allow it at the lowest weight.
26700 if (!CallOperandVal)
26702 Type *type = CallOperandVal->getType();
26703 // Look at the constraint type.
26704 switch (*constraint) {
26706 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26717 if (CallOperandVal->getType()->isIntegerTy())
26718 weight = CW_SpecificReg;
26723 if (type->isFloatingPointTy())
26724 weight = CW_SpecificReg;
26727 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26728 weight = CW_SpecificReg;
26732 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26733 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26734 weight = CW_Register;
26737 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26738 if (C->getZExtValue() <= 31)
26739 weight = CW_Constant;
26743 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26744 if (C->getZExtValue() <= 63)
26745 weight = CW_Constant;
26749 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26750 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26751 weight = CW_Constant;
26755 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26756 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26757 weight = CW_Constant;
26761 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26762 if (C->getZExtValue() <= 3)
26763 weight = CW_Constant;
26767 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26768 if (C->getZExtValue() <= 0xff)
26769 weight = CW_Constant;
26774 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26775 weight = CW_Constant;
26779 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26780 if ((C->getSExtValue() >= -0x80000000LL) &&
26781 (C->getSExtValue() <= 0x7fffffffLL))
26782 weight = CW_Constant;
26786 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26787 if (C->getZExtValue() <= 0xffffffff)
26788 weight = CW_Constant;
26795 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26796 /// with another that has more specific requirements based on the type of the
26797 /// corresponding operand.
26798 const char *X86TargetLowering::
26799 LowerXConstraint(EVT ConstraintVT) const {
26800 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26801 // 'f' like normal targets.
26802 if (ConstraintVT.isFloatingPoint()) {
26803 if (Subtarget->hasSSE2())
26805 if (Subtarget->hasSSE1())
26809 return TargetLowering::LowerXConstraint(ConstraintVT);
26812 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26813 /// vector. If it is invalid, don't add anything to Ops.
26814 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26815 std::string &Constraint,
26816 std::vector<SDValue>&Ops,
26817 SelectionDAG &DAG) const {
26820 // Only support length 1 constraints for now.
26821 if (Constraint.length() > 1) return;
26823 char ConstraintLetter = Constraint[0];
26824 switch (ConstraintLetter) {
26827 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26828 if (C->getZExtValue() <= 31) {
26829 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26835 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26836 if (C->getZExtValue() <= 63) {
26837 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26843 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26844 if (isInt<8>(C->getSExtValue())) {
26845 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26851 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26852 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26853 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26854 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26860 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26861 if (C->getZExtValue() <= 3) {
26862 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26868 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26869 if (C->getZExtValue() <= 255) {
26870 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26876 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26877 if (C->getZExtValue() <= 127) {
26878 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26884 // 32-bit signed value
26885 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26886 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26887 C->getSExtValue())) {
26888 // Widen to 64 bits here to get it sign extended.
26889 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26892 // FIXME gcc accepts some relocatable values here too, but only in certain
26893 // memory models; it's complicated.
26898 // 32-bit unsigned value
26899 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26900 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26901 C->getZExtValue())) {
26902 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26906 // FIXME gcc accepts some relocatable values here too, but only in certain
26907 // memory models; it's complicated.
26911 // Literal immediates are always ok.
26912 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26913 // Widen to 64 bits here to get it sign extended.
26914 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26918 // In any sort of PIC mode addresses need to be computed at runtime by
26919 // adding in a register or some sort of table lookup. These can't
26920 // be used as immediates.
26921 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26924 // If we are in non-pic codegen mode, we allow the address of a global (with
26925 // an optional displacement) to be used with 'i'.
26926 GlobalAddressSDNode *GA = nullptr;
26927 int64_t Offset = 0;
26929 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26931 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26932 Offset += GA->getOffset();
26934 } else if (Op.getOpcode() == ISD::ADD) {
26935 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26936 Offset += C->getZExtValue();
26937 Op = Op.getOperand(0);
26940 } else if (Op.getOpcode() == ISD::SUB) {
26941 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26942 Offset += -C->getZExtValue();
26943 Op = Op.getOperand(0);
26948 // Otherwise, this isn't something we can handle, reject it.
26952 const GlobalValue *GV = GA->getGlobal();
26953 // If we require an extra load to get this address, as in PIC mode, we
26954 // can't accept it.
26955 if (isGlobalStubReference(
26956 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
26959 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
26960 GA->getValueType(0), Offset);
26965 if (Result.getNode()) {
26966 Ops.push_back(Result);
26969 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
26972 std::pair<unsigned, const TargetRegisterClass*>
26973 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
26975 // First, see if this is a constraint that directly corresponds to an LLVM
26977 if (Constraint.size() == 1) {
26978 // GCC Constraint Letters
26979 switch (Constraint[0]) {
26981 // TODO: Slight differences here in allocation order and leaving
26982 // RIP in the class. Do they matter any more here than they do
26983 // in the normal allocation?
26984 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
26985 if (Subtarget->is64Bit()) {
26986 if (VT == MVT::i32 || VT == MVT::f32)
26987 return std::make_pair(0U, &X86::GR32RegClass);
26988 if (VT == MVT::i16)
26989 return std::make_pair(0U, &X86::GR16RegClass);
26990 if (VT == MVT::i8 || VT == MVT::i1)
26991 return std::make_pair(0U, &X86::GR8RegClass);
26992 if (VT == MVT::i64 || VT == MVT::f64)
26993 return std::make_pair(0U, &X86::GR64RegClass);
26996 // 32-bit fallthrough
26997 case 'Q': // Q_REGS
26998 if (VT == MVT::i32 || VT == MVT::f32)
26999 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
27000 if (VT == MVT::i16)
27001 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
27002 if (VT == MVT::i8 || VT == MVT::i1)
27003 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
27004 if (VT == MVT::i64)
27005 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
27007 case 'r': // GENERAL_REGS
27008 case 'l': // INDEX_REGS
27009 if (VT == MVT::i8 || VT == MVT::i1)
27010 return std::make_pair(0U, &X86::GR8RegClass);
27011 if (VT == MVT::i16)
27012 return std::make_pair(0U, &X86::GR16RegClass);
27013 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
27014 return std::make_pair(0U, &X86::GR32RegClass);
27015 return std::make_pair(0U, &X86::GR64RegClass);
27016 case 'R': // LEGACY_REGS
27017 if (VT == MVT::i8 || VT == MVT::i1)
27018 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
27019 if (VT == MVT::i16)
27020 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
27021 if (VT == MVT::i32 || !Subtarget->is64Bit())
27022 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
27023 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
27024 case 'f': // FP Stack registers.
27025 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
27026 // value to the correct fpstack register class.
27027 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
27028 return std::make_pair(0U, &X86::RFP32RegClass);
27029 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
27030 return std::make_pair(0U, &X86::RFP64RegClass);
27031 return std::make_pair(0U, &X86::RFP80RegClass);
27032 case 'y': // MMX_REGS if MMX allowed.
27033 if (!Subtarget->hasMMX()) break;
27034 return std::make_pair(0U, &X86::VR64RegClass);
27035 case 'Y': // SSE_REGS if SSE2 allowed
27036 if (!Subtarget->hasSSE2()) break;
27038 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
27039 if (!Subtarget->hasSSE1()) break;
27041 switch (VT.SimpleTy) {
27043 // Scalar SSE types.
27046 return std::make_pair(0U, &X86::FR32RegClass);
27049 return std::make_pair(0U, &X86::FR64RegClass);
27057 return std::make_pair(0U, &X86::VR128RegClass);
27065 return std::make_pair(0U, &X86::VR256RegClass);
27070 return std::make_pair(0U, &X86::VR512RegClass);
27076 // Use the default implementation in TargetLowering to convert the register
27077 // constraint into a member of a register class.
27078 std::pair<unsigned, const TargetRegisterClass*> Res;
27079 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
27081 // Not found as a standard register?
27083 // Map st(0) -> st(7) -> ST0
27084 if (Constraint.size() == 7 && Constraint[0] == '{' &&
27085 tolower(Constraint[1]) == 's' &&
27086 tolower(Constraint[2]) == 't' &&
27087 Constraint[3] == '(' &&
27088 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
27089 Constraint[5] == ')' &&
27090 Constraint[6] == '}') {
27092 Res.first = X86::FP0+Constraint[4]-'0';
27093 Res.second = &X86::RFP80RegClass;
27097 // GCC allows "st(0)" to be called just plain "st".
27098 if (StringRef("{st}").equals_lower(Constraint)) {
27099 Res.first = X86::FP0;
27100 Res.second = &X86::RFP80RegClass;
27105 if (StringRef("{flags}").equals_lower(Constraint)) {
27106 Res.first = X86::EFLAGS;
27107 Res.second = &X86::CCRRegClass;
27111 // 'A' means EAX + EDX.
27112 if (Constraint == "A") {
27113 Res.first = X86::EAX;
27114 Res.second = &X86::GR32_ADRegClass;
27120 // Otherwise, check to see if this is a register class of the wrong value
27121 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
27122 // turn into {ax},{dx}.
27123 if (Res.second->hasType(VT))
27124 return Res; // Correct type already, nothing to do.
27126 // All of the single-register GCC register classes map their values onto
27127 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
27128 // really want an 8-bit or 32-bit register, map to the appropriate register
27129 // class and return the appropriate register.
27130 if (Res.second == &X86::GR16RegClass) {
27131 if (VT == MVT::i8 || VT == MVT::i1) {
27132 unsigned DestReg = 0;
27133 switch (Res.first) {
27135 case X86::AX: DestReg = X86::AL; break;
27136 case X86::DX: DestReg = X86::DL; break;
27137 case X86::CX: DestReg = X86::CL; break;
27138 case X86::BX: DestReg = X86::BL; break;
27141 Res.first = DestReg;
27142 Res.second = &X86::GR8RegClass;
27144 } else if (VT == MVT::i32 || VT == MVT::f32) {
27145 unsigned DestReg = 0;
27146 switch (Res.first) {
27148 case X86::AX: DestReg = X86::EAX; break;
27149 case X86::DX: DestReg = X86::EDX; break;
27150 case X86::CX: DestReg = X86::ECX; break;
27151 case X86::BX: DestReg = X86::EBX; break;
27152 case X86::SI: DestReg = X86::ESI; break;
27153 case X86::DI: DestReg = X86::EDI; break;
27154 case X86::BP: DestReg = X86::EBP; break;
27155 case X86::SP: DestReg = X86::ESP; break;
27158 Res.first = DestReg;
27159 Res.second = &X86::GR32RegClass;
27161 } else if (VT == MVT::i64 || VT == MVT::f64) {
27162 unsigned DestReg = 0;
27163 switch (Res.first) {
27165 case X86::AX: DestReg = X86::RAX; break;
27166 case X86::DX: DestReg = X86::RDX; break;
27167 case X86::CX: DestReg = X86::RCX; break;
27168 case X86::BX: DestReg = X86::RBX; break;
27169 case X86::SI: DestReg = X86::RSI; break;
27170 case X86::DI: DestReg = X86::RDI; break;
27171 case X86::BP: DestReg = X86::RBP; break;
27172 case X86::SP: DestReg = X86::RSP; break;
27175 Res.first = DestReg;
27176 Res.second = &X86::GR64RegClass;
27179 } else if (Res.second == &X86::FR32RegClass ||
27180 Res.second == &X86::FR64RegClass ||
27181 Res.second == &X86::VR128RegClass ||
27182 Res.second == &X86::VR256RegClass ||
27183 Res.second == &X86::FR32XRegClass ||
27184 Res.second == &X86::FR64XRegClass ||
27185 Res.second == &X86::VR128XRegClass ||
27186 Res.second == &X86::VR256XRegClass ||
27187 Res.second == &X86::VR512RegClass) {
27188 // Handle references to XMM physical registers that got mapped into the
27189 // wrong class. This can happen with constraints like {xmm0} where the
27190 // target independent register mapper will just pick the first match it can
27191 // find, ignoring the required type.
27193 if (VT == MVT::f32 || VT == MVT::i32)
27194 Res.second = &X86::FR32RegClass;
27195 else if (VT == MVT::f64 || VT == MVT::i64)
27196 Res.second = &X86::FR64RegClass;
27197 else if (X86::VR128RegClass.hasType(VT))
27198 Res.second = &X86::VR128RegClass;
27199 else if (X86::VR256RegClass.hasType(VT))
27200 Res.second = &X86::VR256RegClass;
27201 else if (X86::VR512RegClass.hasType(VT))
27202 Res.second = &X86::VR512RegClass;
27208 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
27210 // Scaling factors are not free at all.
27211 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
27212 // will take 2 allocations in the out of order engine instead of 1
27213 // for plain addressing mode, i.e. inst (reg1).
27215 // vaddps (%rsi,%drx), %ymm0, %ymm1
27216 // Requires two allocations (one for the load, one for the computation)
27218 // vaddps (%rsi), %ymm0, %ymm1
27219 // Requires just 1 allocation, i.e., freeing allocations for other operations
27220 // and having less micro operations to execute.
27222 // For some X86 architectures, this is even worse because for instance for
27223 // stores, the complex addressing mode forces the instruction to use the
27224 // "load" ports instead of the dedicated "store" port.
27225 // E.g., on Haswell:
27226 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
27227 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
27228 if (isLegalAddressingMode(AM, Ty))
27229 // Scale represents reg2 * scale, thus account for 1
27230 // as soon as we use a second register.
27231 return AM.Scale != 0;
27235 bool X86TargetLowering::isTargetFTOL() const {
27236 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();