1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<bool> ExperimentalVectorShuffleLegality(
76 "x86-experimental-vector-shuffle-legality", cl::init(false),
77 cl::desc("Enable experimental shuffle legality based on the experimental "
78 "shuffle lowering. Should only be used with the experimental "
82 static cl::opt<int> ReciprocalEstimateRefinementSteps(
83 "x86-recip-refinement-steps", cl::init(1),
84 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
85 "result of the hardware reciprocal estimate instruction."),
88 // Forward declarations.
89 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
92 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
93 SelectionDAG &DAG, SDLoc dl,
94 unsigned vectorWidth) {
95 assert((vectorWidth == 128 || vectorWidth == 256) &&
96 "Unsupported vector width");
97 EVT VT = Vec.getValueType();
98 EVT ElVT = VT.getVectorElementType();
99 unsigned Factor = VT.getSizeInBits()/vectorWidth;
100 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
101 VT.getVectorNumElements()/Factor);
103 // Extract from UNDEF is UNDEF.
104 if (Vec.getOpcode() == ISD::UNDEF)
105 return DAG.getUNDEF(ResultVT);
107 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
108 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
110 // This is the index of the first element of the vectorWidth-bit chunk
112 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
115 // If the input is a buildvector just emit a smaller one.
116 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
117 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
118 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
122 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
125 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
126 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
127 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
128 /// instructions or a simple subregister reference. Idx is an index in the
129 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
130 /// lowering EXTRACT_VECTOR_ELT operations easier.
131 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
132 SelectionDAG &DAG, SDLoc dl) {
133 assert((Vec.getValueType().is256BitVector() ||
134 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
138 /// Generate a DAG to grab 256-bits from a 512-bit vector.
139 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
140 SelectionDAG &DAG, SDLoc dl) {
141 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
142 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
145 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
146 unsigned IdxVal, SelectionDAG &DAG,
147 SDLoc dl, unsigned vectorWidth) {
148 assert((vectorWidth == 128 || vectorWidth == 256) &&
149 "Unsupported vector width");
150 // Inserting UNDEF is Result
151 if (Vec.getOpcode() == ISD::UNDEF)
153 EVT VT = Vec.getValueType();
154 EVT ElVT = VT.getVectorElementType();
155 EVT ResultVT = Result.getValueType();
157 // Insert the relevant vectorWidth bits.
158 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
160 // This is the index of the first element of the vectorWidth-bit chunk
162 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
165 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
166 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
169 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
170 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
171 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
172 /// simple superregister reference. Idx is an index in the 128 bits
173 /// we want. It need not be aligned to a 128-bit boundary. That makes
174 /// lowering INSERT_VECTOR_ELT operations easier.
175 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
176 SelectionDAG &DAG,SDLoc dl) {
177 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
178 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
181 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
182 SelectionDAG &DAG, SDLoc dl) {
183 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
184 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
187 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
188 /// instructions. This is used because creating CONCAT_VECTOR nodes of
189 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
190 /// large BUILD_VECTORS.
191 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
198 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
199 unsigned NumElems, SelectionDAG &DAG,
201 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
202 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
205 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
206 const X86Subtarget &STI)
207 : TargetLowering(TM), Subtarget(&STI) {
208 X86ScalarSSEf64 = Subtarget->hasSSE2();
209 X86ScalarSSEf32 = Subtarget->hasSSE1();
210 TD = getDataLayout();
212 // Set up the TargetLowering object.
213 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
215 // X86 is weird. It always uses i8 for shift amounts and setcc results.
216 setBooleanContents(ZeroOrOneBooleanContent);
217 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
218 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
220 // For 64-bit, since we have so many registers, use the ILP scheduler.
221 // For 32-bit, use the register pressure specific scheduling.
222 // For Atom, always use ILP scheduling.
223 if (Subtarget->isAtom())
224 setSchedulingPreference(Sched::ILP);
225 else if (Subtarget->is64Bit())
226 setSchedulingPreference(Sched::ILP);
228 setSchedulingPreference(Sched::RegPressure);
229 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
230 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
232 // Bypass expensive divides on Atom when compiling with O2.
233 if (TM.getOptLevel() >= CodeGenOpt::Default) {
234 if (Subtarget->hasSlowDivide32())
235 addBypassSlowDiv(32, 8);
236 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
237 addBypassSlowDiv(64, 16);
240 if (Subtarget->isTargetKnownWindowsMSVC()) {
241 // Setup Windows compiler runtime calls.
242 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
243 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
244 setLibcallName(RTLIB::SREM_I64, "_allrem");
245 setLibcallName(RTLIB::UREM_I64, "_aullrem");
246 setLibcallName(RTLIB::MUL_I64, "_allmul");
247 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
248 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
249 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
250 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
251 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
253 // The _ftol2 runtime function has an unusual calling conv, which
254 // is modeled by a special pseudo-instruction.
255 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
256 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
257 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
258 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
261 if (Subtarget->isTargetDarwin()) {
262 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
263 setUseUnderscoreSetJmp(false);
264 setUseUnderscoreLongJmp(false);
265 } else if (Subtarget->isTargetWindowsGNU()) {
266 // MS runtime is weird: it exports _setjmp, but longjmp!
267 setUseUnderscoreSetJmp(true);
268 setUseUnderscoreLongJmp(false);
270 setUseUnderscoreSetJmp(true);
271 setUseUnderscoreLongJmp(true);
274 // Set up the register classes.
275 addRegisterClass(MVT::i8, &X86::GR8RegClass);
276 addRegisterClass(MVT::i16, &X86::GR16RegClass);
277 addRegisterClass(MVT::i32, &X86::GR32RegClass);
278 if (Subtarget->is64Bit())
279 addRegisterClass(MVT::i64, &X86::GR64RegClass);
281 for (MVT VT : MVT::integer_valuetypes())
282 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
284 // We don't accept any truncstore of integer registers.
285 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
286 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
287 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
288 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
289 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
290 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
292 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
294 // SETOEQ and SETUNE require checking two conditions.
295 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
296 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
297 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
298 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
299 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
300 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
302 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
304 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
305 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
306 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
308 if (Subtarget->is64Bit()) {
309 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
310 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
311 } else if (!TM.Options.UseSoftFloat) {
312 // We have an algorithm for SSE2->double, and we turn this into a
313 // 64-bit FILD followed by conditional FADD for other targets.
314 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
315 // We have an algorithm for SSE2, and we turn this into a 64-bit
316 // FILD for other targets.
317 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
320 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
322 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
323 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
325 if (!TM.Options.UseSoftFloat) {
326 // SSE has no i16 to fp conversion, only i32
327 if (X86ScalarSSEf32) {
328 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
329 // f32 and f64 cases are Legal, f80 case is not
330 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
332 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
333 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
336 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
337 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
340 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
341 // are Legal, f80 is custom lowered.
342 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
343 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
345 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
347 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
348 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
350 if (X86ScalarSSEf32) {
351 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
352 // f32 and f64 cases are Legal, f80 case is not
353 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
355 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
356 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
359 // Handle FP_TO_UINT by promoting the destination to a larger signed
361 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
362 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
363 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
365 if (Subtarget->is64Bit()) {
366 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
367 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
368 } else if (!TM.Options.UseSoftFloat) {
369 // Since AVX is a superset of SSE3, only check for SSE here.
370 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
371 // Expand FP_TO_UINT into a select.
372 // FIXME: We would like to use a Custom expander here eventually to do
373 // the optimal thing for SSE vs. the default expansion in the legalizer.
374 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
376 // With SSE3 we can use fisttpll to convert to a signed i64; without
377 // SSE, we're stuck with a fistpll.
378 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
381 if (isTargetFTOL()) {
382 // Use the _ftol2 runtime function, which has a pseudo-instruction
383 // to handle its weird calling convention.
384 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
387 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
388 if (!X86ScalarSSEf64) {
389 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
390 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
391 if (Subtarget->is64Bit()) {
392 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
393 // Without SSE, i64->f64 goes through memory.
394 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
398 // Scalar integer divide and remainder are lowered to use operations that
399 // produce two results, to match the available instructions. This exposes
400 // the two-result form to trivial CSE, which is able to combine x/y and x%y
401 // into a single instruction.
403 // Scalar integer multiply-high is also lowered to use two-result
404 // operations, to match the available instructions. However, plain multiply
405 // (low) operations are left as Legal, as there are single-result
406 // instructions for this in x86. Using the two-result multiply instructions
407 // when both high and low results are needed must be arranged by dagcombine.
408 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
410 setOperationAction(ISD::MULHS, VT, Expand);
411 setOperationAction(ISD::MULHU, VT, Expand);
412 setOperationAction(ISD::SDIV, VT, Expand);
413 setOperationAction(ISD::UDIV, VT, Expand);
414 setOperationAction(ISD::SREM, VT, Expand);
415 setOperationAction(ISD::UREM, VT, Expand);
417 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
418 setOperationAction(ISD::ADDC, VT, Custom);
419 setOperationAction(ISD::ADDE, VT, Custom);
420 setOperationAction(ISD::SUBC, VT, Custom);
421 setOperationAction(ISD::SUBE, VT, Custom);
424 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
425 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
426 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
427 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
428 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
429 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
430 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
431 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
432 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
433 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
434 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
435 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
436 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
437 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
438 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
439 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
440 if (Subtarget->is64Bit())
441 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
442 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
443 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
444 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
445 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
446 setOperationAction(ISD::FREM , MVT::f32 , Expand);
447 setOperationAction(ISD::FREM , MVT::f64 , Expand);
448 setOperationAction(ISD::FREM , MVT::f80 , Expand);
449 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
451 // Promote the i8 variants and force them on up to i32 which has a shorter
453 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
454 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
455 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
456 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
457 if (Subtarget->hasBMI()) {
458 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
459 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
460 if (Subtarget->is64Bit())
461 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
463 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
464 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
465 if (Subtarget->is64Bit())
466 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
469 if (Subtarget->hasLZCNT()) {
470 // When promoting the i8 variants, force them to i32 for a shorter
472 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
473 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
478 if (Subtarget->is64Bit())
479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
481 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
482 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
483 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
484 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
485 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
486 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
487 if (Subtarget->is64Bit()) {
488 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
489 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
493 // Special handling for half-precision floating point conversions.
494 // If we don't have F16C support, then lower half float conversions
495 // into library calls.
496 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
497 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
498 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
501 // There's never any support for operations beyond MVT::f32.
502 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
503 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
504 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
505 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
507 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
508 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
509 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
510 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
511 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
512 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
514 if (Subtarget->hasPOPCNT()) {
515 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
517 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
518 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
519 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
520 if (Subtarget->is64Bit())
521 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
524 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
526 if (!Subtarget->hasMOVBE())
527 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
529 // These should be promoted to a larger select which is supported.
530 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
531 // X86 wants to expand cmov itself.
532 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
533 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
534 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
535 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
536 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
537 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
538 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
539 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
540 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
541 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
542 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
543 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
544 if (Subtarget->is64Bit()) {
545 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
546 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
548 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
549 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
550 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
551 // support continuation, user-level threading, and etc.. As a result, no
552 // other SjLj exception interfaces are implemented and please don't build
553 // your own exception handling based on them.
554 // LLVM/Clang supports zero-cost DWARF exception handling.
555 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
556 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
559 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
560 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
561 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
562 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
563 if (Subtarget->is64Bit())
564 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
565 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
566 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
567 if (Subtarget->is64Bit()) {
568 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
569 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
570 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
571 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
572 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
574 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
575 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
576 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
577 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
578 if (Subtarget->is64Bit()) {
579 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
580 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
581 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
584 if (Subtarget->hasSSE1())
585 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
587 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
589 // Expand certain atomics
590 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
592 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
593 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
594 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
597 if (Subtarget->hasCmpxchg16b()) {
598 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
601 // FIXME - use subtarget debug flags
602 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
603 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
604 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
607 if (Subtarget->is64Bit()) {
608 setExceptionPointerRegister(X86::RAX);
609 setExceptionSelectorRegister(X86::RDX);
611 setExceptionPointerRegister(X86::EAX);
612 setExceptionSelectorRegister(X86::EDX);
614 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
615 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
617 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
618 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
620 setOperationAction(ISD::TRAP, MVT::Other, Legal);
621 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
623 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
624 setOperationAction(ISD::VASTART , MVT::Other, Custom);
625 setOperationAction(ISD::VAEND , MVT::Other, Expand);
626 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
627 // TargetInfo::X86_64ABIBuiltinVaList
628 setOperationAction(ISD::VAARG , MVT::Other, Custom);
629 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
631 // TargetInfo::CharPtrBuiltinVaList
632 setOperationAction(ISD::VAARG , MVT::Other, Expand);
633 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
636 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
637 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
639 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
641 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
642 // f32 and f64 use SSE.
643 // Set up the FP register classes.
644 addRegisterClass(MVT::f32, &X86::FR32RegClass);
645 addRegisterClass(MVT::f64, &X86::FR64RegClass);
647 // Use ANDPD to simulate FABS.
648 setOperationAction(ISD::FABS , MVT::f64, Custom);
649 setOperationAction(ISD::FABS , MVT::f32, Custom);
651 // Use XORP to simulate FNEG.
652 setOperationAction(ISD::FNEG , MVT::f64, Custom);
653 setOperationAction(ISD::FNEG , MVT::f32, Custom);
655 // Use ANDPD and ORPD to simulate FCOPYSIGN.
656 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
657 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
659 // Lower this to FGETSIGNx86 plus an AND.
660 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
661 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
663 // We don't support sin/cos/fmod
664 setOperationAction(ISD::FSIN , MVT::f64, Expand);
665 setOperationAction(ISD::FCOS , MVT::f64, Expand);
666 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
667 setOperationAction(ISD::FSIN , MVT::f32, Expand);
668 setOperationAction(ISD::FCOS , MVT::f32, Expand);
669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
671 // Expand FP immediates into loads from the stack, except for the special
673 addLegalFPImmediate(APFloat(+0.0)); // xorpd
674 addLegalFPImmediate(APFloat(+0.0f)); // xorps
675 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
676 // Use SSE for f32, x87 for f64.
677 // Set up the FP register classes.
678 addRegisterClass(MVT::f32, &X86::FR32RegClass);
679 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
681 // Use ANDPS to simulate FABS.
682 setOperationAction(ISD::FABS , MVT::f32, Custom);
684 // Use XORP to simulate FNEG.
685 setOperationAction(ISD::FNEG , MVT::f32, Custom);
687 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
689 // Use ANDPS and ORPS to simulate FCOPYSIGN.
690 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
691 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
693 // We don't support sin/cos/fmod
694 setOperationAction(ISD::FSIN , MVT::f32, Expand);
695 setOperationAction(ISD::FCOS , MVT::f32, Expand);
696 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
698 // Special cases we handle for FP constants.
699 addLegalFPImmediate(APFloat(+0.0f)); // xorps
700 addLegalFPImmediate(APFloat(+0.0)); // FLD0
701 addLegalFPImmediate(APFloat(+1.0)); // FLD1
702 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
703 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
705 if (!TM.Options.UnsafeFPMath) {
706 setOperationAction(ISD::FSIN , MVT::f64, Expand);
707 setOperationAction(ISD::FCOS , MVT::f64, Expand);
708 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
710 } else if (!TM.Options.UseSoftFloat) {
711 // f32 and f64 in x87.
712 // Set up the FP register classes.
713 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
714 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
716 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
717 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
718 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
719 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
721 if (!TM.Options.UnsafeFPMath) {
722 setOperationAction(ISD::FSIN , MVT::f64, Expand);
723 setOperationAction(ISD::FSIN , MVT::f32, Expand);
724 setOperationAction(ISD::FCOS , MVT::f64, Expand);
725 setOperationAction(ISD::FCOS , MVT::f32, Expand);
726 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
727 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
729 addLegalFPImmediate(APFloat(+0.0)); // FLD0
730 addLegalFPImmediate(APFloat(+1.0)); // FLD1
731 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
732 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
733 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
734 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
735 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
736 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
739 // We don't support FMA.
740 setOperationAction(ISD::FMA, MVT::f64, Expand);
741 setOperationAction(ISD::FMA, MVT::f32, Expand);
743 // Long double always uses X87.
744 if (!TM.Options.UseSoftFloat) {
745 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
746 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
747 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
749 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
750 addLegalFPImmediate(TmpFlt); // FLD0
752 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
755 APFloat TmpFlt2(+1.0);
756 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
758 addLegalFPImmediate(TmpFlt2); // FLD1
759 TmpFlt2.changeSign();
760 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
763 if (!TM.Options.UnsafeFPMath) {
764 setOperationAction(ISD::FSIN , MVT::f80, Expand);
765 setOperationAction(ISD::FCOS , MVT::f80, Expand);
766 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
769 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
770 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
771 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
772 setOperationAction(ISD::FRINT, MVT::f80, Expand);
773 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
774 setOperationAction(ISD::FMA, MVT::f80, Expand);
777 // Always use a library call for pow.
778 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
779 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
780 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
782 setOperationAction(ISD::FLOG, MVT::f80, Expand);
783 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
784 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
785 setOperationAction(ISD::FEXP, MVT::f80, Expand);
786 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
787 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
788 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
790 // First set operation action for all vector types to either promote
791 // (for widening) or expand (for scalarization). Then we will selectively
792 // turn on ones that can be effectively codegen'd.
793 for (MVT VT : MVT::vector_valuetypes()) {
794 setOperationAction(ISD::ADD , VT, Expand);
795 setOperationAction(ISD::SUB , VT, Expand);
796 setOperationAction(ISD::FADD, VT, Expand);
797 setOperationAction(ISD::FNEG, VT, Expand);
798 setOperationAction(ISD::FSUB, VT, Expand);
799 setOperationAction(ISD::MUL , VT, Expand);
800 setOperationAction(ISD::FMUL, VT, Expand);
801 setOperationAction(ISD::SDIV, VT, Expand);
802 setOperationAction(ISD::UDIV, VT, Expand);
803 setOperationAction(ISD::FDIV, VT, Expand);
804 setOperationAction(ISD::SREM, VT, Expand);
805 setOperationAction(ISD::UREM, VT, Expand);
806 setOperationAction(ISD::LOAD, VT, Expand);
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
808 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
809 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
810 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
811 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
812 setOperationAction(ISD::FABS, VT, Expand);
813 setOperationAction(ISD::FSIN, VT, Expand);
814 setOperationAction(ISD::FSINCOS, VT, Expand);
815 setOperationAction(ISD::FCOS, VT, Expand);
816 setOperationAction(ISD::FSINCOS, VT, Expand);
817 setOperationAction(ISD::FREM, VT, Expand);
818 setOperationAction(ISD::FMA, VT, Expand);
819 setOperationAction(ISD::FPOWI, VT, Expand);
820 setOperationAction(ISD::FSQRT, VT, Expand);
821 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
822 setOperationAction(ISD::FFLOOR, VT, Expand);
823 setOperationAction(ISD::FCEIL, VT, Expand);
824 setOperationAction(ISD::FTRUNC, VT, Expand);
825 setOperationAction(ISD::FRINT, VT, Expand);
826 setOperationAction(ISD::FNEARBYINT, VT, Expand);
827 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
828 setOperationAction(ISD::MULHS, VT, Expand);
829 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
830 setOperationAction(ISD::MULHU, VT, Expand);
831 setOperationAction(ISD::SDIVREM, VT, Expand);
832 setOperationAction(ISD::UDIVREM, VT, Expand);
833 setOperationAction(ISD::FPOW, VT, Expand);
834 setOperationAction(ISD::CTPOP, VT, Expand);
835 setOperationAction(ISD::CTTZ, VT, Expand);
836 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
837 setOperationAction(ISD::CTLZ, VT, Expand);
838 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
839 setOperationAction(ISD::SHL, VT, Expand);
840 setOperationAction(ISD::SRA, VT, Expand);
841 setOperationAction(ISD::SRL, VT, Expand);
842 setOperationAction(ISD::ROTL, VT, Expand);
843 setOperationAction(ISD::ROTR, VT, Expand);
844 setOperationAction(ISD::BSWAP, VT, Expand);
845 setOperationAction(ISD::SETCC, VT, Expand);
846 setOperationAction(ISD::FLOG, VT, Expand);
847 setOperationAction(ISD::FLOG2, VT, Expand);
848 setOperationAction(ISD::FLOG10, VT, Expand);
849 setOperationAction(ISD::FEXP, VT, Expand);
850 setOperationAction(ISD::FEXP2, VT, Expand);
851 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
852 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
853 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
854 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
855 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
856 setOperationAction(ISD::TRUNCATE, VT, Expand);
857 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
858 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
859 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
860 setOperationAction(ISD::VSELECT, VT, Expand);
861 setOperationAction(ISD::SELECT_CC, VT, Expand);
862 for (MVT InnerVT : MVT::vector_valuetypes()) {
863 setTruncStoreAction(InnerVT, VT, Expand);
865 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
866 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
868 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
869 // types, we have to deal with them whether we ask for Expansion or not.
870 // Setting Expand causes its own optimisation problems though, so leave
872 if (VT.getVectorElementType() == MVT::i1)
873 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
877 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
878 // with -msoft-float, disable use of MMX as well.
879 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
880 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
881 // No operations on x86mmx supported, everything uses intrinsics.
884 // MMX-sized vectors (other than x86mmx) are expected to be expanded
885 // into smaller operations.
886 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
887 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
888 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
889 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
890 setOperationAction(ISD::AND, MVT::v8i8, Expand);
891 setOperationAction(ISD::AND, MVT::v4i16, Expand);
892 setOperationAction(ISD::AND, MVT::v2i32, Expand);
893 setOperationAction(ISD::AND, MVT::v1i64, Expand);
894 setOperationAction(ISD::OR, MVT::v8i8, Expand);
895 setOperationAction(ISD::OR, MVT::v4i16, Expand);
896 setOperationAction(ISD::OR, MVT::v2i32, Expand);
897 setOperationAction(ISD::OR, MVT::v1i64, Expand);
898 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
899 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
900 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
901 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
902 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
903 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
904 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
905 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
907 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
908 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
909 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
910 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
911 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
912 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
913 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
914 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
916 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
917 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
919 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
920 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
921 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
922 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
923 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
924 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
925 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
926 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
927 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
928 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
929 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
930 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
931 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
934 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
935 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
937 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
938 // registers cannot be used even for integer operations.
939 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
940 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
941 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
942 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
944 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
945 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
946 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
947 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
948 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
949 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
950 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
951 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
952 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
953 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
954 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
955 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
956 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
957 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
958 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
959 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
960 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
961 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
962 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
963 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
964 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
965 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
967 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
968 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
969 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
970 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
972 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
973 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
974 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
976 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
978 // Only provide customized ctpop vector bit twiddling for vector types we
979 // know to perform better than using the popcnt instructions on each vector
980 // element. If popcnt isn't supported, always provide the custom version.
981 if (!Subtarget->hasPOPCNT()) {
982 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
983 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
986 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
987 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
988 MVT VT = (MVT::SimpleValueType)i;
989 // Do not attempt to custom lower non-power-of-2 vectors
990 if (!isPowerOf2_32(VT.getVectorNumElements()))
992 // Do not attempt to custom lower non-128-bit vectors
993 if (!VT.is128BitVector())
995 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
996 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
997 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1000 // We support custom legalizing of sext and anyext loads for specific
1001 // memory vector types which we can load as a scalar (or sequence of
1002 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1003 // loads these must work with a single scalar load.
1004 for (MVT VT : MVT::integer_vector_valuetypes()) {
1005 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1006 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1007 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1008 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1009 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1010 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1011 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1012 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1013 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1016 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1017 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1018 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1019 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1020 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1021 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1023 if (Subtarget->is64Bit()) {
1024 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1025 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1028 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1029 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1030 MVT VT = (MVT::SimpleValueType)i;
1032 // Do not attempt to promote non-128-bit vectors
1033 if (!VT.is128BitVector())
1036 setOperationAction(ISD::AND, VT, Promote);
1037 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1038 setOperationAction(ISD::OR, VT, Promote);
1039 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1040 setOperationAction(ISD::XOR, VT, Promote);
1041 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1042 setOperationAction(ISD::LOAD, VT, Promote);
1043 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1044 setOperationAction(ISD::SELECT, VT, Promote);
1045 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1048 // Custom lower v2i64 and v2f64 selects.
1049 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1050 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1051 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1052 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1054 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1055 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1057 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1058 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1059 // As there is no 64-bit GPR available, we need build a special custom
1060 // sequence to convert from v2i32 to v2f32.
1061 if (!Subtarget->is64Bit())
1062 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1064 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1065 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1067 for (MVT VT : MVT::fp_vector_valuetypes())
1068 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1070 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1071 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1072 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1075 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1076 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1077 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1078 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1079 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1080 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1081 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1082 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1083 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1084 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1085 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1087 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1088 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1089 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1090 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1091 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1092 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1093 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1094 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1095 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1096 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1098 // FIXME: Do we need to handle scalar-to-vector here?
1099 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1101 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1102 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1103 setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
1104 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
1105 setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
1106 // There is no BLENDI for byte vectors. We don't need to custom lower
1107 // some vselects for now.
1108 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1110 // SSE41 brings specific instructions for doing vector sign extend even in
1111 // cases where we don't have SRA.
1112 for (MVT VT : MVT::integer_vector_valuetypes()) {
1113 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1114 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1115 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1118 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1119 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1120 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1121 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1122 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1123 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1124 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1126 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1127 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1128 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1129 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1130 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1131 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1133 // i8 and i16 vectors are custom because the source register and source
1134 // source memory operand types are not the same width. f32 vectors are
1135 // custom since the immediate controlling the insert encodes additional
1137 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1138 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1139 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1140 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1142 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1144 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1145 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1147 // FIXME: these should be Legal, but that's only for the case where
1148 // the index is constant. For now custom expand to deal with that.
1149 if (Subtarget->is64Bit()) {
1150 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1151 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1155 if (Subtarget->hasSSE2()) {
1156 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1157 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1159 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1160 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1162 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1163 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1165 // In the customized shift lowering, the legal cases in AVX2 will be
1167 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1168 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1170 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1171 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1173 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1176 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1177 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1178 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1179 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1180 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1181 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1182 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1184 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1185 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1186 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1188 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1190 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1191 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1192 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1193 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1194 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1195 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1196 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1197 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1198 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1199 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1201 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1202 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1203 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1204 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1205 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1206 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1207 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1209 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1210 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1211 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1212 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1214 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1215 // even though v8i16 is a legal type.
1216 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1217 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1218 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1220 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1221 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1222 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1224 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1225 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1227 for (MVT VT : MVT::fp_vector_valuetypes())
1228 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1230 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1231 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1233 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1234 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1236 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1237 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1239 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1240 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1241 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1242 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1244 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1245 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1246 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1248 setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
1249 setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
1250 setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
1251 setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
1253 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1254 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1255 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1256 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1257 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1258 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1259 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1260 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1261 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1262 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1263 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1264 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1266 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1267 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1268 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1269 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1270 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1271 setOperationAction(ISD::FMA, MVT::f32, Legal);
1272 setOperationAction(ISD::FMA, MVT::f64, Legal);
1275 if (Subtarget->hasInt256()) {
1276 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1277 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1278 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1279 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1281 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1282 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1283 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1284 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1286 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1287 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1288 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1289 // Don't lower v32i8 because there is no 128-bit byte mul
1291 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1292 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1293 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1294 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1296 setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
1297 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1299 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1300 // when we have a 256bit-wide blend with immediate.
1301 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1303 // Only provide customized ctpop vector bit twiddling for vector types we
1304 // know to perform better than using the popcnt instructions on each
1305 // vector element. If popcnt isn't supported, always provide the custom
1307 if (!Subtarget->hasPOPCNT())
1308 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1310 // Custom CTPOP always performs better on natively supported v8i32
1311 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1313 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1314 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1315 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1316 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1317 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1318 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1319 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1321 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1322 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1323 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1324 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1325 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1326 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1328 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1329 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1330 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1331 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1333 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1334 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1335 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1336 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1338 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1339 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1340 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1341 // Don't lower v32i8 because there is no 128-bit byte mul
1344 // In the customized shift lowering, the legal cases in AVX2 will be
1346 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1347 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1349 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1350 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1352 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1354 // Custom lower several nodes for 256-bit types.
1355 for (MVT VT : MVT::vector_valuetypes()) {
1356 if (VT.getScalarSizeInBits() >= 32) {
1357 setOperationAction(ISD::MLOAD, VT, Legal);
1358 setOperationAction(ISD::MSTORE, VT, Legal);
1360 // Extract subvector is special because the value type
1361 // (result) is 128-bit but the source is 256-bit wide.
1362 if (VT.is128BitVector()) {
1363 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1365 // Do not attempt to custom lower other non-256-bit vectors
1366 if (!VT.is256BitVector())
1369 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1370 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1371 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1372 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1373 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1374 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1375 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1378 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1379 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1380 MVT VT = (MVT::SimpleValueType)i;
1382 // Do not attempt to promote non-256-bit vectors
1383 if (!VT.is256BitVector())
1386 setOperationAction(ISD::AND, VT, Promote);
1387 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1388 setOperationAction(ISD::OR, VT, Promote);
1389 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1390 setOperationAction(ISD::XOR, VT, Promote);
1391 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1392 setOperationAction(ISD::LOAD, VT, Promote);
1393 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1394 setOperationAction(ISD::SELECT, VT, Promote);
1395 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1399 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1400 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1401 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1402 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1403 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1405 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1406 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1407 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1409 for (MVT VT : MVT::fp_vector_valuetypes())
1410 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1412 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1413 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1414 setOperationAction(ISD::XOR, MVT::i1, Legal);
1415 setOperationAction(ISD::OR, MVT::i1, Legal);
1416 setOperationAction(ISD::AND, MVT::i1, Legal);
1417 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1418 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1419 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1420 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1421 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1423 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1424 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1425 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1426 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1427 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1428 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1430 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1431 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1432 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1433 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1434 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1435 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1436 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1437 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1439 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1440 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1441 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1442 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1443 if (Subtarget->is64Bit()) {
1444 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1445 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1446 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1447 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1449 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1450 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1451 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1452 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1453 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1454 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1455 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1456 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1457 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1458 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1459 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1460 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1461 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1462 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1464 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1465 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1466 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1467 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1468 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1469 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1470 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1471 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1472 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1473 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1474 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1475 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1476 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1478 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1479 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1480 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1481 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1482 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1483 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1485 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1486 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1488 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1490 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1491 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1492 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1493 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1494 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1495 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1496 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1497 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1498 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1500 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1501 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1503 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1504 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1506 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1508 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1509 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1511 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1512 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1514 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1515 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1517 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1518 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1519 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1520 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1521 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1522 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1524 if (Subtarget->hasCDI()) {
1525 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1526 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1529 // Custom lower several nodes.
1530 for (MVT VT : MVT::vector_valuetypes()) {
1531 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1532 // Extract subvector is special because the value type
1533 // (result) is 256/128-bit but the source is 512-bit wide.
1534 if (VT.is128BitVector() || VT.is256BitVector()) {
1535 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1537 if (VT.getVectorElementType() == MVT::i1)
1538 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1540 // Do not attempt to custom lower other non-512-bit vectors
1541 if (!VT.is512BitVector())
1544 if ( EltSize >= 32) {
1545 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1546 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1547 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1548 setOperationAction(ISD::VSELECT, VT, Legal);
1549 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1550 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1551 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1552 setOperationAction(ISD::MLOAD, VT, Legal);
1553 setOperationAction(ISD::MSTORE, VT, Legal);
1556 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1557 MVT VT = (MVT::SimpleValueType)i;
1559 // Do not attempt to promote non-512-bit vectors.
1560 if (!VT.is512BitVector())
1563 setOperationAction(ISD::SELECT, VT, Promote);
1564 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1568 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1569 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1570 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1572 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1573 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1575 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1576 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1577 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1578 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1579 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1580 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1581 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1582 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1583 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1585 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1586 const MVT VT = (MVT::SimpleValueType)i;
1588 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1590 // Do not attempt to promote non-512-bit vectors.
1591 if (!VT.is512BitVector())
1595 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1596 setOperationAction(ISD::VSELECT, VT, Legal);
1601 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1602 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1603 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1605 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1606 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1607 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1609 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1610 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1611 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1612 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1613 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1614 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1617 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1618 // of this type with custom code.
1619 for (MVT VT : MVT::vector_valuetypes())
1620 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1622 // We want to custom lower some of our intrinsics.
1623 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1624 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1625 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1626 if (!Subtarget->is64Bit())
1627 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1629 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1630 // handle type legalization for these operations here.
1632 // FIXME: We really should do custom legalization for addition and
1633 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1634 // than generic legalization for 64-bit multiplication-with-overflow, though.
1635 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1636 // Add/Sub/Mul with overflow operations are custom lowered.
1638 setOperationAction(ISD::SADDO, VT, Custom);
1639 setOperationAction(ISD::UADDO, VT, Custom);
1640 setOperationAction(ISD::SSUBO, VT, Custom);
1641 setOperationAction(ISD::USUBO, VT, Custom);
1642 setOperationAction(ISD::SMULO, VT, Custom);
1643 setOperationAction(ISD::UMULO, VT, Custom);
1647 if (!Subtarget->is64Bit()) {
1648 // These libcalls are not available in 32-bit.
1649 setLibcallName(RTLIB::SHL_I128, nullptr);
1650 setLibcallName(RTLIB::SRL_I128, nullptr);
1651 setLibcallName(RTLIB::SRA_I128, nullptr);
1654 // Combine sin / cos into one node or libcall if possible.
1655 if (Subtarget->hasSinCos()) {
1656 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1657 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1658 if (Subtarget->isTargetDarwin()) {
1659 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1660 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1661 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1662 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1666 if (Subtarget->isTargetWin64()) {
1667 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1668 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1669 setOperationAction(ISD::SREM, MVT::i128, Custom);
1670 setOperationAction(ISD::UREM, MVT::i128, Custom);
1671 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1672 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1675 // We have target-specific dag combine patterns for the following nodes:
1676 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1677 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1678 setTargetDAGCombine(ISD::BITCAST);
1679 setTargetDAGCombine(ISD::VSELECT);
1680 setTargetDAGCombine(ISD::SELECT);
1681 setTargetDAGCombine(ISD::SHL);
1682 setTargetDAGCombine(ISD::SRA);
1683 setTargetDAGCombine(ISD::SRL);
1684 setTargetDAGCombine(ISD::OR);
1685 setTargetDAGCombine(ISD::AND);
1686 setTargetDAGCombine(ISD::ADD);
1687 setTargetDAGCombine(ISD::FADD);
1688 setTargetDAGCombine(ISD::FSUB);
1689 setTargetDAGCombine(ISD::FMA);
1690 setTargetDAGCombine(ISD::SUB);
1691 setTargetDAGCombine(ISD::LOAD);
1692 setTargetDAGCombine(ISD::MLOAD);
1693 setTargetDAGCombine(ISD::STORE);
1694 setTargetDAGCombine(ISD::MSTORE);
1695 setTargetDAGCombine(ISD::ZERO_EXTEND);
1696 setTargetDAGCombine(ISD::ANY_EXTEND);
1697 setTargetDAGCombine(ISD::SIGN_EXTEND);
1698 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1699 setTargetDAGCombine(ISD::TRUNCATE);
1700 setTargetDAGCombine(ISD::SINT_TO_FP);
1701 setTargetDAGCombine(ISD::SETCC);
1702 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1703 setTargetDAGCombine(ISD::BUILD_VECTOR);
1704 setTargetDAGCombine(ISD::MUL);
1705 setTargetDAGCombine(ISD::XOR);
1707 computeRegisterProperties();
1709 // On Darwin, -Os means optimize for size without hurting performance,
1710 // do not reduce the limit.
1711 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1712 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1713 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1714 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1715 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1716 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1717 setPrefLoopAlignment(4); // 2^4 bytes.
1719 // Predictable cmov don't hurt on atom because it's in-order.
1720 PredictableSelectIsExpensive = !Subtarget->isAtom();
1721 EnableExtLdPromotion = true;
1722 setPrefFunctionAlignment(4); // 2^4 bytes.
1724 verifyIntrinsicTables();
1727 // This has so far only been implemented for 64-bit MachO.
1728 bool X86TargetLowering::useLoadStackGuardNode() const {
1729 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1732 TargetLoweringBase::LegalizeTypeAction
1733 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1734 if (ExperimentalVectorWideningLegalization &&
1735 VT.getVectorNumElements() != 1 &&
1736 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1737 return TypeWidenVector;
1739 return TargetLoweringBase::getPreferredVectorAction(VT);
1742 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1744 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1746 const unsigned NumElts = VT.getVectorNumElements();
1747 const EVT EltVT = VT.getVectorElementType();
1748 if (VT.is512BitVector()) {
1749 if (Subtarget->hasAVX512())
1750 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1751 EltVT == MVT::f32 || EltVT == MVT::f64)
1753 case 8: return MVT::v8i1;
1754 case 16: return MVT::v16i1;
1756 if (Subtarget->hasBWI())
1757 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1759 case 32: return MVT::v32i1;
1760 case 64: return MVT::v64i1;
1764 if (VT.is256BitVector() || VT.is128BitVector()) {
1765 if (Subtarget->hasVLX())
1766 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1767 EltVT == MVT::f32 || EltVT == MVT::f64)
1769 case 2: return MVT::v2i1;
1770 case 4: return MVT::v4i1;
1771 case 8: return MVT::v8i1;
1773 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1774 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1776 case 8: return MVT::v8i1;
1777 case 16: return MVT::v16i1;
1778 case 32: return MVT::v32i1;
1782 return VT.changeVectorElementTypeToInteger();
1785 /// Helper for getByValTypeAlignment to determine
1786 /// the desired ByVal argument alignment.
1787 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1790 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1791 if (VTy->getBitWidth() == 128)
1793 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1794 unsigned EltAlign = 0;
1795 getMaxByValAlign(ATy->getElementType(), EltAlign);
1796 if (EltAlign > MaxAlign)
1797 MaxAlign = EltAlign;
1798 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1799 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1800 unsigned EltAlign = 0;
1801 getMaxByValAlign(STy->getElementType(i), EltAlign);
1802 if (EltAlign > MaxAlign)
1803 MaxAlign = EltAlign;
1810 /// Return the desired alignment for ByVal aggregate
1811 /// function arguments in the caller parameter area. For X86, aggregates
1812 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1813 /// are at 4-byte boundaries.
1814 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1815 if (Subtarget->is64Bit()) {
1816 // Max of 8 and alignment of type.
1817 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1824 if (Subtarget->hasSSE1())
1825 getMaxByValAlign(Ty, Align);
1829 /// Returns the target specific optimal type for load
1830 /// and store operations as a result of memset, memcpy, and memmove
1831 /// lowering. If DstAlign is zero that means it's safe to destination
1832 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1833 /// means there isn't a need to check it against alignment requirement,
1834 /// probably because the source does not need to be loaded. If 'IsMemset' is
1835 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1836 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1837 /// source is constant so it does not need to be loaded.
1838 /// It returns EVT::Other if the type should be determined using generic
1839 /// target-independent logic.
1841 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1842 unsigned DstAlign, unsigned SrcAlign,
1843 bool IsMemset, bool ZeroMemset,
1845 MachineFunction &MF) const {
1846 const Function *F = MF.getFunction();
1847 if ((!IsMemset || ZeroMemset) &&
1848 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1850 (Subtarget->isUnalignedMemAccessFast() ||
1851 ((DstAlign == 0 || DstAlign >= 16) &&
1852 (SrcAlign == 0 || SrcAlign >= 16)))) {
1854 if (Subtarget->hasInt256())
1856 if (Subtarget->hasFp256())
1859 if (Subtarget->hasSSE2())
1861 if (Subtarget->hasSSE1())
1863 } else if (!MemcpyStrSrc && Size >= 8 &&
1864 !Subtarget->is64Bit() &&
1865 Subtarget->hasSSE2()) {
1866 // Do not use f64 to lower memcpy if source is string constant. It's
1867 // better to use i32 to avoid the loads.
1871 if (Subtarget->is64Bit() && Size >= 8)
1876 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1878 return X86ScalarSSEf32;
1879 else if (VT == MVT::f64)
1880 return X86ScalarSSEf64;
1885 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1890 *Fast = Subtarget->isUnalignedMemAccessFast();
1894 /// Return the entry encoding for a jump table in the
1895 /// current function. The returned value is a member of the
1896 /// MachineJumpTableInfo::JTEntryKind enum.
1897 unsigned X86TargetLowering::getJumpTableEncoding() const {
1898 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1900 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1901 Subtarget->isPICStyleGOT())
1902 return MachineJumpTableInfo::EK_Custom32;
1904 // Otherwise, use the normal jump table encoding heuristics.
1905 return TargetLowering::getJumpTableEncoding();
1909 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1910 const MachineBasicBlock *MBB,
1911 unsigned uid,MCContext &Ctx) const{
1912 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1913 Subtarget->isPICStyleGOT());
1914 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1916 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1917 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1920 /// Returns relocation base for the given PIC jumptable.
1921 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1922 SelectionDAG &DAG) const {
1923 if (!Subtarget->is64Bit())
1924 // This doesn't have SDLoc associated with it, but is not really the
1925 // same as a Register.
1926 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1930 /// This returns the relocation base for the given PIC jumptable,
1931 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1932 const MCExpr *X86TargetLowering::
1933 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1934 MCContext &Ctx) const {
1935 // X86-64 uses RIP relative addressing based on the jump table label.
1936 if (Subtarget->isPICStyleRIPRel())
1937 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1939 // Otherwise, the reference is relative to the PIC base.
1940 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1943 // FIXME: Why this routine is here? Move to RegInfo!
1944 std::pair<const TargetRegisterClass*, uint8_t>
1945 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1946 const TargetRegisterClass *RRC = nullptr;
1948 switch (VT.SimpleTy) {
1950 return TargetLowering::findRepresentativeClass(VT);
1951 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1952 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1955 RRC = &X86::VR64RegClass;
1957 case MVT::f32: case MVT::f64:
1958 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1959 case MVT::v4f32: case MVT::v2f64:
1960 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1962 RRC = &X86::VR128RegClass;
1965 return std::make_pair(RRC, Cost);
1968 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1969 unsigned &Offset) const {
1970 if (!Subtarget->isTargetLinux())
1973 if (Subtarget->is64Bit()) {
1974 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1976 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1988 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1989 unsigned DestAS) const {
1990 assert(SrcAS != DestAS && "Expected different address spaces!");
1992 return SrcAS < 256 && DestAS < 256;
1995 //===----------------------------------------------------------------------===//
1996 // Return Value Calling Convention Implementation
1997 //===----------------------------------------------------------------------===//
1999 #include "X86GenCallingConv.inc"
2002 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2003 MachineFunction &MF, bool isVarArg,
2004 const SmallVectorImpl<ISD::OutputArg> &Outs,
2005 LLVMContext &Context) const {
2006 SmallVector<CCValAssign, 16> RVLocs;
2007 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2008 return CCInfo.CheckReturn(Outs, RetCC_X86);
2011 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2012 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2017 X86TargetLowering::LowerReturn(SDValue Chain,
2018 CallingConv::ID CallConv, bool isVarArg,
2019 const SmallVectorImpl<ISD::OutputArg> &Outs,
2020 const SmallVectorImpl<SDValue> &OutVals,
2021 SDLoc dl, SelectionDAG &DAG) const {
2022 MachineFunction &MF = DAG.getMachineFunction();
2023 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2025 SmallVector<CCValAssign, 16> RVLocs;
2026 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2027 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2030 SmallVector<SDValue, 6> RetOps;
2031 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2032 // Operand #1 = Bytes To Pop
2033 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2036 // Copy the result values into the output registers.
2037 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2038 CCValAssign &VA = RVLocs[i];
2039 assert(VA.isRegLoc() && "Can only return in registers!");
2040 SDValue ValToCopy = OutVals[i];
2041 EVT ValVT = ValToCopy.getValueType();
2043 // Promote values to the appropriate types.
2044 if (VA.getLocInfo() == CCValAssign::SExt)
2045 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2046 else if (VA.getLocInfo() == CCValAssign::ZExt)
2047 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2048 else if (VA.getLocInfo() == CCValAssign::AExt)
2049 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2050 else if (VA.getLocInfo() == CCValAssign::BCvt)
2051 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2053 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2054 "Unexpected FP-extend for return value.");
2056 // If this is x86-64, and we disabled SSE, we can't return FP values,
2057 // or SSE or MMX vectors.
2058 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2059 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2060 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2061 report_fatal_error("SSE register return with SSE disabled");
2063 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2064 // llvm-gcc has never done it right and no one has noticed, so this
2065 // should be OK for now.
2066 if (ValVT == MVT::f64 &&
2067 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2068 report_fatal_error("SSE2 register return with SSE2 disabled");
2070 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2071 // the RET instruction and handled by the FP Stackifier.
2072 if (VA.getLocReg() == X86::FP0 ||
2073 VA.getLocReg() == X86::FP1) {
2074 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2075 // change the value to the FP stack register class.
2076 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2077 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2078 RetOps.push_back(ValToCopy);
2079 // Don't emit a copytoreg.
2083 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2084 // which is returned in RAX / RDX.
2085 if (Subtarget->is64Bit()) {
2086 if (ValVT == MVT::x86mmx) {
2087 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2088 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2089 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2091 // If we don't have SSE2 available, convert to v4f32 so the generated
2092 // register is legal.
2093 if (!Subtarget->hasSSE2())
2094 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2099 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2100 Flag = Chain.getValue(1);
2101 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2104 // The x86-64 ABIs require that for returning structs by value we copy
2105 // the sret argument into %rax/%eax (depending on ABI) for the return.
2106 // Win32 requires us to put the sret argument to %eax as well.
2107 // We saved the argument into a virtual register in the entry block,
2108 // so now we copy the value out and into %rax/%eax.
2110 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2111 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2112 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2113 // either case FuncInfo->setSRetReturnReg() will have been called.
2114 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2115 assert((Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) &&
2116 "No need for an sret register");
2117 SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg, getPointerTy());
2120 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2121 X86::RAX : X86::EAX;
2122 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2123 Flag = Chain.getValue(1);
2125 // RAX/EAX now acts like a return value.
2126 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2129 RetOps[0] = Chain; // Update chain.
2131 // Add the flag if we have it.
2133 RetOps.push_back(Flag);
2135 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2138 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2139 if (N->getNumValues() != 1)
2141 if (!N->hasNUsesOfValue(1, 0))
2144 SDValue TCChain = Chain;
2145 SDNode *Copy = *N->use_begin();
2146 if (Copy->getOpcode() == ISD::CopyToReg) {
2147 // If the copy has a glue operand, we conservatively assume it isn't safe to
2148 // perform a tail call.
2149 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2151 TCChain = Copy->getOperand(0);
2152 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2155 bool HasRet = false;
2156 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2158 if (UI->getOpcode() != X86ISD::RET_FLAG)
2160 // If we are returning more than one value, we can definitely
2161 // not make a tail call see PR19530
2162 if (UI->getNumOperands() > 4)
2164 if (UI->getNumOperands() == 4 &&
2165 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2178 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2179 ISD::NodeType ExtendKind) const {
2181 // TODO: Is this also valid on 32-bit?
2182 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2183 ReturnMVT = MVT::i8;
2185 ReturnMVT = MVT::i32;
2187 EVT MinVT = getRegisterType(Context, ReturnMVT);
2188 return VT.bitsLT(MinVT) ? MinVT : VT;
2191 /// Lower the result values of a call into the
2192 /// appropriate copies out of appropriate physical registers.
2195 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2196 CallingConv::ID CallConv, bool isVarArg,
2197 const SmallVectorImpl<ISD::InputArg> &Ins,
2198 SDLoc dl, SelectionDAG &DAG,
2199 SmallVectorImpl<SDValue> &InVals) const {
2201 // Assign locations to each value returned by this call.
2202 SmallVector<CCValAssign, 16> RVLocs;
2203 bool Is64Bit = Subtarget->is64Bit();
2204 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2206 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2208 // Copy all of the result registers out of their specified physreg.
2209 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2210 CCValAssign &VA = RVLocs[i];
2211 EVT CopyVT = VA.getValVT();
2213 // If this is x86-64, and we disabled SSE, we can't return FP values
2214 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2215 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2216 report_fatal_error("SSE register return with SSE disabled");
2219 // If we prefer to use the value in xmm registers, copy it out as f80 and
2220 // use a truncate to move it from fp stack reg to xmm reg.
2221 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2222 isScalarFPTypeInSSEReg(VA.getValVT()))
2225 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2226 CopyVT, InFlag).getValue(1);
2227 SDValue Val = Chain.getValue(0);
2229 if (CopyVT != VA.getValVT())
2230 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2231 // This truncation won't change the value.
2232 DAG.getIntPtrConstant(1));
2234 InFlag = Chain.getValue(2);
2235 InVals.push_back(Val);
2241 //===----------------------------------------------------------------------===//
2242 // C & StdCall & Fast Calling Convention implementation
2243 //===----------------------------------------------------------------------===//
2244 // StdCall calling convention seems to be standard for many Windows' API
2245 // routines and around. It differs from C calling convention just a little:
2246 // callee should clean up the stack, not caller. Symbols should be also
2247 // decorated in some fancy way :) It doesn't support any vector arguments.
2248 // For info on fast calling convention see Fast Calling Convention (tail call)
2249 // implementation LowerX86_32FastCCCallTo.
2251 /// CallIsStructReturn - Determines whether a call uses struct return
2253 enum StructReturnType {
2258 static StructReturnType
2259 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2261 return NotStructReturn;
2263 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2264 if (!Flags.isSRet())
2265 return NotStructReturn;
2266 if (Flags.isInReg())
2267 return RegStructReturn;
2268 return StackStructReturn;
2271 /// Determines whether a function uses struct return semantics.
2272 static StructReturnType
2273 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2275 return NotStructReturn;
2277 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2278 if (!Flags.isSRet())
2279 return NotStructReturn;
2280 if (Flags.isInReg())
2281 return RegStructReturn;
2282 return StackStructReturn;
2285 /// Make a copy of an aggregate at address specified by "Src" to address
2286 /// "Dst" with size and alignment information specified by the specific
2287 /// parameter attribute. The copy will be passed as a byval function parameter.
2289 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2290 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2292 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2294 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2295 /*isVolatile*/false, /*AlwaysInline=*/true,
2296 MachinePointerInfo(), MachinePointerInfo());
2299 /// Return true if the calling convention is one that
2300 /// supports tail call optimization.
2301 static bool IsTailCallConvention(CallingConv::ID CC) {
2302 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2303 CC == CallingConv::HiPE);
2306 /// \brief Return true if the calling convention is a C calling convention.
2307 static bool IsCCallConvention(CallingConv::ID CC) {
2308 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2309 CC == CallingConv::X86_64_SysV);
2312 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2313 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2317 CallingConv::ID CalleeCC = CS.getCallingConv();
2318 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2324 /// Return true if the function is being made into
2325 /// a tailcall target by changing its ABI.
2326 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2327 bool GuaranteedTailCallOpt) {
2328 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2332 X86TargetLowering::LowerMemArgument(SDValue Chain,
2333 CallingConv::ID CallConv,
2334 const SmallVectorImpl<ISD::InputArg> &Ins,
2335 SDLoc dl, SelectionDAG &DAG,
2336 const CCValAssign &VA,
2337 MachineFrameInfo *MFI,
2339 // Create the nodes corresponding to a load from this parameter slot.
2340 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2341 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2342 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2343 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2346 // If value is passed by pointer we have address passed instead of the value
2348 if (VA.getLocInfo() == CCValAssign::Indirect)
2349 ValVT = VA.getLocVT();
2351 ValVT = VA.getValVT();
2353 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2354 // changed with more analysis.
2355 // In case of tail call optimization mark all arguments mutable. Since they
2356 // could be overwritten by lowering of arguments in case of a tail call.
2357 if (Flags.isByVal()) {
2358 unsigned Bytes = Flags.getByValSize();
2359 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2360 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2361 return DAG.getFrameIndex(FI, getPointerTy());
2363 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2364 VA.getLocMemOffset(), isImmutable);
2365 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2366 return DAG.getLoad(ValVT, dl, Chain, FIN,
2367 MachinePointerInfo::getFixedStack(FI),
2368 false, false, false, 0);
2372 // FIXME: Get this from tablegen.
2373 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2374 const X86Subtarget *Subtarget) {
2375 assert(Subtarget->is64Bit());
2377 if (Subtarget->isCallingConvWin64(CallConv)) {
2378 static const MCPhysReg GPR64ArgRegsWin64[] = {
2379 X86::RCX, X86::RDX, X86::R8, X86::R9
2381 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2384 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2385 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2387 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2390 // FIXME: Get this from tablegen.
2391 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2392 CallingConv::ID CallConv,
2393 const X86Subtarget *Subtarget) {
2394 assert(Subtarget->is64Bit());
2395 if (Subtarget->isCallingConvWin64(CallConv)) {
2396 // The XMM registers which might contain var arg parameters are shadowed
2397 // in their paired GPR. So we only need to save the GPR to their home
2399 // TODO: __vectorcall will change this.
2403 const Function *Fn = MF.getFunction();
2404 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2405 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2406 "SSE register cannot be used when SSE is disabled!");
2407 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2408 !Subtarget->hasSSE1())
2409 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2413 static const MCPhysReg XMMArgRegs64Bit[] = {
2414 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2415 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2417 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2421 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2422 CallingConv::ID CallConv,
2424 const SmallVectorImpl<ISD::InputArg> &Ins,
2427 SmallVectorImpl<SDValue> &InVals)
2429 MachineFunction &MF = DAG.getMachineFunction();
2430 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2432 const Function* Fn = MF.getFunction();
2433 if (Fn->hasExternalLinkage() &&
2434 Subtarget->isTargetCygMing() &&
2435 Fn->getName() == "main")
2436 FuncInfo->setForceFramePointer(true);
2438 MachineFrameInfo *MFI = MF.getFrameInfo();
2439 bool Is64Bit = Subtarget->is64Bit();
2440 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2442 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2443 "Var args not supported with calling convention fastcc, ghc or hipe");
2445 // Assign locations to all of the incoming arguments.
2446 SmallVector<CCValAssign, 16> ArgLocs;
2447 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2449 // Allocate shadow area for Win64
2451 CCInfo.AllocateStack(32, 8);
2453 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2455 unsigned LastVal = ~0U;
2457 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2458 CCValAssign &VA = ArgLocs[i];
2459 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2461 assert(VA.getValNo() != LastVal &&
2462 "Don't support value assigned to multiple locs yet");
2464 LastVal = VA.getValNo();
2466 if (VA.isRegLoc()) {
2467 EVT RegVT = VA.getLocVT();
2468 const TargetRegisterClass *RC;
2469 if (RegVT == MVT::i32)
2470 RC = &X86::GR32RegClass;
2471 else if (Is64Bit && RegVT == MVT::i64)
2472 RC = &X86::GR64RegClass;
2473 else if (RegVT == MVT::f32)
2474 RC = &X86::FR32RegClass;
2475 else if (RegVT == MVT::f64)
2476 RC = &X86::FR64RegClass;
2477 else if (RegVT.is512BitVector())
2478 RC = &X86::VR512RegClass;
2479 else if (RegVT.is256BitVector())
2480 RC = &X86::VR256RegClass;
2481 else if (RegVT.is128BitVector())
2482 RC = &X86::VR128RegClass;
2483 else if (RegVT == MVT::x86mmx)
2484 RC = &X86::VR64RegClass;
2485 else if (RegVT == MVT::i1)
2486 RC = &X86::VK1RegClass;
2487 else if (RegVT == MVT::v8i1)
2488 RC = &X86::VK8RegClass;
2489 else if (RegVT == MVT::v16i1)
2490 RC = &X86::VK16RegClass;
2491 else if (RegVT == MVT::v32i1)
2492 RC = &X86::VK32RegClass;
2493 else if (RegVT == MVT::v64i1)
2494 RC = &X86::VK64RegClass;
2496 llvm_unreachable("Unknown argument type!");
2498 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2499 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2501 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2502 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2504 if (VA.getLocInfo() == CCValAssign::SExt)
2505 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2506 DAG.getValueType(VA.getValVT()));
2507 else if (VA.getLocInfo() == CCValAssign::ZExt)
2508 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2509 DAG.getValueType(VA.getValVT()));
2510 else if (VA.getLocInfo() == CCValAssign::BCvt)
2511 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2513 if (VA.isExtInLoc()) {
2514 // Handle MMX values passed in XMM regs.
2515 if (RegVT.isVector())
2516 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2518 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2521 assert(VA.isMemLoc());
2522 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2525 // If value is passed via pointer - do a load.
2526 if (VA.getLocInfo() == CCValAssign::Indirect)
2527 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2528 MachinePointerInfo(), false, false, false, 0);
2530 InVals.push_back(ArgValue);
2533 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2534 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2535 // The x86-64 ABIs require that for returning structs by value we copy
2536 // the sret argument into %rax/%eax (depending on ABI) for the return.
2537 // Win32 requires us to put the sret argument to %eax as well.
2538 // Save the argument into a virtual register so that we can access it
2539 // from the return points.
2540 if (Ins[i].Flags.isSRet()) {
2541 unsigned Reg = FuncInfo->getSRetReturnReg();
2543 MVT PtrTy = getPointerTy();
2544 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2545 FuncInfo->setSRetReturnReg(Reg);
2547 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2548 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2554 unsigned StackSize = CCInfo.getNextStackOffset();
2555 // Align stack specially for tail calls.
2556 if (FuncIsMadeTailCallSafe(CallConv,
2557 MF.getTarget().Options.GuaranteedTailCallOpt))
2558 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2560 // If the function takes variable number of arguments, make a frame index for
2561 // the start of the first vararg value... for expansion of llvm.va_start. We
2562 // can skip this if there are no va_start calls.
2563 if (MFI->hasVAStart() &&
2564 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2565 CallConv != CallingConv::X86_ThisCall))) {
2566 FuncInfo->setVarArgsFrameIndex(
2567 MFI->CreateFixedObject(1, StackSize, true));
2570 // Figure out if XMM registers are in use.
2571 assert(!(MF.getTarget().Options.UseSoftFloat &&
2572 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
2573 "SSE register cannot be used when SSE is disabled!");
2575 // 64-bit calling conventions support varargs and register parameters, so we
2576 // have to do extra work to spill them in the prologue.
2577 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2578 // Find the first unallocated argument registers.
2579 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2580 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2581 unsigned NumIntRegs =
2582 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2583 unsigned NumXMMRegs =
2584 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2585 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2586 "SSE register cannot be used when SSE is disabled!");
2588 // Gather all the live in physical registers.
2589 SmallVector<SDValue, 6> LiveGPRs;
2590 SmallVector<SDValue, 8> LiveXMMRegs;
2592 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2593 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2595 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2597 if (!ArgXMMs.empty()) {
2598 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2599 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2600 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2601 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2602 LiveXMMRegs.push_back(
2603 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2608 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2609 // Get to the caller-allocated home save location. Add 8 to account
2610 // for the return address.
2611 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2612 FuncInfo->setRegSaveFrameIndex(
2613 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2614 // Fixup to set vararg frame on shadow area (4 x i64).
2616 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2618 // For X86-64, if there are vararg parameters that are passed via
2619 // registers, then we must store them to their spots on the stack so
2620 // they may be loaded by deferencing the result of va_next.
2621 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2622 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2623 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2624 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2627 // Store the integer parameter registers.
2628 SmallVector<SDValue, 8> MemOps;
2629 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2631 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2632 for (SDValue Val : LiveGPRs) {
2633 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2634 DAG.getIntPtrConstant(Offset));
2636 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2637 MachinePointerInfo::getFixedStack(
2638 FuncInfo->getRegSaveFrameIndex(), Offset),
2640 MemOps.push_back(Store);
2644 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2645 // Now store the XMM (fp + vector) parameter registers.
2646 SmallVector<SDValue, 12> SaveXMMOps;
2647 SaveXMMOps.push_back(Chain);
2648 SaveXMMOps.push_back(ALVal);
2649 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2650 FuncInfo->getRegSaveFrameIndex()));
2651 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2652 FuncInfo->getVarArgsFPOffset()));
2653 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2655 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2656 MVT::Other, SaveXMMOps));
2659 if (!MemOps.empty())
2660 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2663 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2664 // Find the largest legal vector type.
2665 MVT VecVT = MVT::Other;
2666 // FIXME: Only some x86_32 calling conventions support AVX512.
2667 if (Subtarget->hasAVX512() &&
2668 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2669 CallConv == CallingConv::Intel_OCL_BI)))
2670 VecVT = MVT::v16f32;
2671 else if (Subtarget->hasAVX())
2673 else if (Subtarget->hasSSE2())
2676 // We forward some GPRs and some vector types.
2677 SmallVector<MVT, 2> RegParmTypes;
2678 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2679 RegParmTypes.push_back(IntVT);
2680 if (VecVT != MVT::Other)
2681 RegParmTypes.push_back(VecVT);
2683 // Compute the set of forwarded registers. The rest are scratch.
2684 SmallVectorImpl<ForwardedRegister> &Forwards =
2685 FuncInfo->getForwardedMustTailRegParms();
2686 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2688 // Conservatively forward AL on x86_64, since it might be used for varargs.
2689 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2690 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2691 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2694 // Copy all forwards from physical to virtual registers.
2695 for (ForwardedRegister &F : Forwards) {
2696 // FIXME: Can we use a less constrained schedule?
2697 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2698 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2699 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2703 // Some CCs need callee pop.
2704 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2705 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2706 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2708 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2709 // If this is an sret function, the return should pop the hidden pointer.
2710 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2711 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2712 argsAreStructReturn(Ins) == StackStructReturn)
2713 FuncInfo->setBytesToPopOnReturn(4);
2717 // RegSaveFrameIndex is X86-64 only.
2718 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2719 if (CallConv == CallingConv::X86_FastCall ||
2720 CallConv == CallingConv::X86_ThisCall)
2721 // fastcc functions can't have varargs.
2722 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2725 FuncInfo->setArgumentStackSize(StackSize);
2731 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2732 SDValue StackPtr, SDValue Arg,
2733 SDLoc dl, SelectionDAG &DAG,
2734 const CCValAssign &VA,
2735 ISD::ArgFlagsTy Flags) const {
2736 unsigned LocMemOffset = VA.getLocMemOffset();
2737 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2738 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2739 if (Flags.isByVal())
2740 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2742 return DAG.getStore(Chain, dl, Arg, PtrOff,
2743 MachinePointerInfo::getStack(LocMemOffset),
2747 /// Emit a load of return address if tail call
2748 /// optimization is performed and it is required.
2750 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2751 SDValue &OutRetAddr, SDValue Chain,
2752 bool IsTailCall, bool Is64Bit,
2753 int FPDiff, SDLoc dl) const {
2754 // Adjust the Return address stack slot.
2755 EVT VT = getPointerTy();
2756 OutRetAddr = getReturnAddressFrameIndex(DAG);
2758 // Load the "old" Return address.
2759 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2760 false, false, false, 0);
2761 return SDValue(OutRetAddr.getNode(), 1);
2764 /// Emit a store of the return address if tail call
2765 /// optimization is performed and it is required (FPDiff!=0).
2766 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2767 SDValue Chain, SDValue RetAddrFrIdx,
2768 EVT PtrVT, unsigned SlotSize,
2769 int FPDiff, SDLoc dl) {
2770 // Store the return address to the appropriate stack slot.
2771 if (!FPDiff) return Chain;
2772 // Calculate the new stack slot for the return address.
2773 int NewReturnAddrFI =
2774 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2776 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2777 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2778 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2784 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2785 SmallVectorImpl<SDValue> &InVals) const {
2786 SelectionDAG &DAG = CLI.DAG;
2788 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2789 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2790 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2791 SDValue Chain = CLI.Chain;
2792 SDValue Callee = CLI.Callee;
2793 CallingConv::ID CallConv = CLI.CallConv;
2794 bool &isTailCall = CLI.IsTailCall;
2795 bool isVarArg = CLI.IsVarArg;
2797 MachineFunction &MF = DAG.getMachineFunction();
2798 bool Is64Bit = Subtarget->is64Bit();
2799 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2800 StructReturnType SR = callIsStructReturn(Outs);
2801 bool IsSibcall = false;
2802 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2804 if (MF.getTarget().Options.DisableTailCalls)
2807 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2809 // Force this to be a tail call. The verifier rules are enough to ensure
2810 // that we can lower this successfully without moving the return address
2813 } else if (isTailCall) {
2814 // Check if it's really possible to do a tail call.
2815 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2816 isVarArg, SR != NotStructReturn,
2817 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2818 Outs, OutVals, Ins, DAG);
2820 // Sibcalls are automatically detected tailcalls which do not require
2822 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2829 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2830 "Var args not supported with calling convention fastcc, ghc or hipe");
2832 // Analyze operands of the call, assigning locations to each operand.
2833 SmallVector<CCValAssign, 16> ArgLocs;
2834 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2836 // Allocate shadow area for Win64
2838 CCInfo.AllocateStack(32, 8);
2840 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2842 // Get a count of how many bytes are to be pushed on the stack.
2843 unsigned NumBytes = CCInfo.getNextStackOffset();
2845 // This is a sibcall. The memory operands are available in caller's
2846 // own caller's stack.
2848 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2849 IsTailCallConvention(CallConv))
2850 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2853 if (isTailCall && !IsSibcall && !IsMustTail) {
2854 // Lower arguments at fp - stackoffset + fpdiff.
2855 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2857 FPDiff = NumBytesCallerPushed - NumBytes;
2859 // Set the delta of movement of the returnaddr stackslot.
2860 // But only set if delta is greater than previous delta.
2861 if (FPDiff < X86Info->getTCReturnAddrDelta())
2862 X86Info->setTCReturnAddrDelta(FPDiff);
2865 unsigned NumBytesToPush = NumBytes;
2866 unsigned NumBytesToPop = NumBytes;
2868 // If we have an inalloca argument, all stack space has already been allocated
2869 // for us and be right at the top of the stack. We don't support multiple
2870 // arguments passed in memory when using inalloca.
2871 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2873 if (!ArgLocs.back().isMemLoc())
2874 report_fatal_error("cannot use inalloca attribute on a register "
2876 if (ArgLocs.back().getLocMemOffset() != 0)
2877 report_fatal_error("any parameter with the inalloca attribute must be "
2878 "the only memory argument");
2882 Chain = DAG.getCALLSEQ_START(
2883 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2885 SDValue RetAddrFrIdx;
2886 // Load return address for tail calls.
2887 if (isTailCall && FPDiff)
2888 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2889 Is64Bit, FPDiff, dl);
2891 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2892 SmallVector<SDValue, 8> MemOpChains;
2895 // Walk the register/memloc assignments, inserting copies/loads. In the case
2896 // of tail call optimization arguments are handle later.
2897 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2898 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2899 // Skip inalloca arguments, they have already been written.
2900 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2901 if (Flags.isInAlloca())
2904 CCValAssign &VA = ArgLocs[i];
2905 EVT RegVT = VA.getLocVT();
2906 SDValue Arg = OutVals[i];
2907 bool isByVal = Flags.isByVal();
2909 // Promote the value if needed.
2910 switch (VA.getLocInfo()) {
2911 default: llvm_unreachable("Unknown loc info!");
2912 case CCValAssign::Full: break;
2913 case CCValAssign::SExt:
2914 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2916 case CCValAssign::ZExt:
2917 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2919 case CCValAssign::AExt:
2920 if (RegVT.is128BitVector()) {
2921 // Special case: passing MMX values in XMM registers.
2922 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2923 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2924 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2926 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2928 case CCValAssign::BCvt:
2929 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2931 case CCValAssign::Indirect: {
2932 // Store the argument.
2933 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2934 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2935 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2936 MachinePointerInfo::getFixedStack(FI),
2943 if (VA.isRegLoc()) {
2944 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2945 if (isVarArg && IsWin64) {
2946 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2947 // shadow reg if callee is a varargs function.
2948 unsigned ShadowReg = 0;
2949 switch (VA.getLocReg()) {
2950 case X86::XMM0: ShadowReg = X86::RCX; break;
2951 case X86::XMM1: ShadowReg = X86::RDX; break;
2952 case X86::XMM2: ShadowReg = X86::R8; break;
2953 case X86::XMM3: ShadowReg = X86::R9; break;
2956 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2958 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2959 assert(VA.isMemLoc());
2960 if (!StackPtr.getNode())
2961 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2963 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2964 dl, DAG, VA, Flags));
2968 if (!MemOpChains.empty())
2969 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2971 if (Subtarget->isPICStyleGOT()) {
2972 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2975 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2976 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2978 // If we are tail calling and generating PIC/GOT style code load the
2979 // address of the callee into ECX. The value in ecx is used as target of
2980 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2981 // for tail calls on PIC/GOT architectures. Normally we would just put the
2982 // address of GOT into ebx and then call target@PLT. But for tail calls
2983 // ebx would be restored (since ebx is callee saved) before jumping to the
2986 // Note: The actual moving to ECX is done further down.
2987 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2988 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2989 !G->getGlobal()->hasProtectedVisibility())
2990 Callee = LowerGlobalAddress(Callee, DAG);
2991 else if (isa<ExternalSymbolSDNode>(Callee))
2992 Callee = LowerExternalSymbol(Callee, DAG);
2996 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
2997 // From AMD64 ABI document:
2998 // For calls that may call functions that use varargs or stdargs
2999 // (prototype-less calls or calls to functions containing ellipsis (...) in
3000 // the declaration) %al is used as hidden argument to specify the number
3001 // of SSE registers used. The contents of %al do not need to match exactly
3002 // the number of registers, but must be an ubound on the number of SSE
3003 // registers used and is in the range 0 - 8 inclusive.
3005 // Count the number of XMM registers allocated.
3006 static const MCPhysReg XMMArgRegs[] = {
3007 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3008 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3010 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3011 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3012 && "SSE registers cannot be used when SSE is disabled");
3014 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3015 DAG.getConstant(NumXMMRegs, MVT::i8)));
3018 if (isVarArg && IsMustTail) {
3019 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3020 for (const auto &F : Forwards) {
3021 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3022 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3026 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3027 // don't need this because the eligibility check rejects calls that require
3028 // shuffling arguments passed in memory.
3029 if (!IsSibcall && isTailCall) {
3030 // Force all the incoming stack arguments to be loaded from the stack
3031 // before any new outgoing arguments are stored to the stack, because the
3032 // outgoing stack slots may alias the incoming argument stack slots, and
3033 // the alias isn't otherwise explicit. This is slightly more conservative
3034 // than necessary, because it means that each store effectively depends
3035 // on every argument instead of just those arguments it would clobber.
3036 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3038 SmallVector<SDValue, 8> MemOpChains2;
3041 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3042 CCValAssign &VA = ArgLocs[i];
3045 assert(VA.isMemLoc());
3046 SDValue Arg = OutVals[i];
3047 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3048 // Skip inalloca arguments. They don't require any work.
3049 if (Flags.isInAlloca())
3051 // Create frame index.
3052 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3053 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3054 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3055 FIN = DAG.getFrameIndex(FI, getPointerTy());
3057 if (Flags.isByVal()) {
3058 // Copy relative to framepointer.
3059 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3060 if (!StackPtr.getNode())
3061 StackPtr = DAG.getCopyFromReg(Chain, dl,
3062 RegInfo->getStackRegister(),
3064 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3066 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3070 // Store relative to framepointer.
3071 MemOpChains2.push_back(
3072 DAG.getStore(ArgChain, dl, Arg, FIN,
3073 MachinePointerInfo::getFixedStack(FI),
3078 if (!MemOpChains2.empty())
3079 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3081 // Store the return address to the appropriate stack slot.
3082 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3083 getPointerTy(), RegInfo->getSlotSize(),
3087 // Build a sequence of copy-to-reg nodes chained together with token chain
3088 // and flag operands which copy the outgoing args into registers.
3090 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3091 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3092 RegsToPass[i].second, InFlag);
3093 InFlag = Chain.getValue(1);
3096 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3097 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3098 // In the 64-bit large code model, we have to make all calls
3099 // through a register, since the call instruction's 32-bit
3100 // pc-relative offset may not be large enough to hold the whole
3102 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3103 // If the callee is a GlobalAddress node (quite common, every direct call
3104 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3106 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3108 // We should use extra load for direct calls to dllimported functions in
3110 const GlobalValue *GV = G->getGlobal();
3111 if (!GV->hasDLLImportStorageClass()) {
3112 unsigned char OpFlags = 0;
3113 bool ExtraLoad = false;
3114 unsigned WrapperKind = ISD::DELETED_NODE;
3116 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3117 // external symbols most go through the PLT in PIC mode. If the symbol
3118 // has hidden or protected visibility, or if it is static or local, then
3119 // we don't need to use the PLT - we can directly call it.
3120 if (Subtarget->isTargetELF() &&
3121 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3122 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3123 OpFlags = X86II::MO_PLT;
3124 } else if (Subtarget->isPICStyleStubAny() &&
3125 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3126 (!Subtarget->getTargetTriple().isMacOSX() ||
3127 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3128 // PC-relative references to external symbols should go through $stub,
3129 // unless we're building with the leopard linker or later, which
3130 // automatically synthesizes these stubs.
3131 OpFlags = X86II::MO_DARWIN_STUB;
3132 } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
3133 cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
3134 // If the function is marked as non-lazy, generate an indirect call
3135 // which loads from the GOT directly. This avoids runtime overhead
3136 // at the cost of eager binding (and one extra byte of encoding).
3137 OpFlags = X86II::MO_GOTPCREL;
3138 WrapperKind = X86ISD::WrapperRIP;
3142 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3143 G->getOffset(), OpFlags);
3145 // Add a wrapper if needed.
3146 if (WrapperKind != ISD::DELETED_NODE)
3147 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3148 // Add extra indirection if needed.
3150 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3151 MachinePointerInfo::getGOT(),
3152 false, false, false, 0);
3154 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3155 unsigned char OpFlags = 0;
3157 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3158 // external symbols should go through the PLT.
3159 if (Subtarget->isTargetELF() &&
3160 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3161 OpFlags = X86II::MO_PLT;
3162 } else if (Subtarget->isPICStyleStubAny() &&
3163 (!Subtarget->getTargetTriple().isMacOSX() ||
3164 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3165 // PC-relative references to external symbols should go through $stub,
3166 // unless we're building with the leopard linker or later, which
3167 // automatically synthesizes these stubs.
3168 OpFlags = X86II::MO_DARWIN_STUB;
3171 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3173 } else if (Subtarget->isTarget64BitILP32() &&
3174 Callee->getValueType(0) == MVT::i32) {
3175 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3176 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3179 // Returns a chain & a flag for retval copy to use.
3180 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3181 SmallVector<SDValue, 8> Ops;
3183 if (!IsSibcall && isTailCall) {
3184 Chain = DAG.getCALLSEQ_END(Chain,
3185 DAG.getIntPtrConstant(NumBytesToPop, true),
3186 DAG.getIntPtrConstant(0, true), InFlag, dl);
3187 InFlag = Chain.getValue(1);
3190 Ops.push_back(Chain);
3191 Ops.push_back(Callee);
3194 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3196 // Add argument registers to the end of the list so that they are known live
3198 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3199 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3200 RegsToPass[i].second.getValueType()));
3202 // Add a register mask operand representing the call-preserved registers.
3203 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3204 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3205 assert(Mask && "Missing call preserved mask for calling convention");
3206 Ops.push_back(DAG.getRegisterMask(Mask));
3208 if (InFlag.getNode())
3209 Ops.push_back(InFlag);
3213 //// If this is the first return lowered for this function, add the regs
3214 //// to the liveout set for the function.
3215 // This isn't right, although it's probably harmless on x86; liveouts
3216 // should be computed from returns not tail calls. Consider a void
3217 // function making a tail call to a function returning int.
3218 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3221 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3222 InFlag = Chain.getValue(1);
3224 // Create the CALLSEQ_END node.
3225 unsigned NumBytesForCalleeToPop;
3226 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3227 DAG.getTarget().Options.GuaranteedTailCallOpt))
3228 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3229 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3230 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3231 SR == StackStructReturn)
3232 // If this is a call to a struct-return function, the callee
3233 // pops the hidden struct pointer, so we have to push it back.
3234 // This is common for Darwin/X86, Linux & Mingw32 targets.
3235 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3236 NumBytesForCalleeToPop = 4;
3238 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3240 // Returns a flag for retval copy to use.
3242 Chain = DAG.getCALLSEQ_END(Chain,
3243 DAG.getIntPtrConstant(NumBytesToPop, true),
3244 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3247 InFlag = Chain.getValue(1);
3250 // Handle result values, copying them out of physregs into vregs that we
3252 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3253 Ins, dl, DAG, InVals);
3256 //===----------------------------------------------------------------------===//
3257 // Fast Calling Convention (tail call) implementation
3258 //===----------------------------------------------------------------------===//
3260 // Like std call, callee cleans arguments, convention except that ECX is
3261 // reserved for storing the tail called function address. Only 2 registers are
3262 // free for argument passing (inreg). Tail call optimization is performed
3264 // * tailcallopt is enabled
3265 // * caller/callee are fastcc
3266 // On X86_64 architecture with GOT-style position independent code only local
3267 // (within module) calls are supported at the moment.
3268 // To keep the stack aligned according to platform abi the function
3269 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3270 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3271 // If a tail called function callee has more arguments than the caller the
3272 // caller needs to make sure that there is room to move the RETADDR to. This is
3273 // achieved by reserving an area the size of the argument delta right after the
3274 // original RETADDR, but before the saved framepointer or the spilled registers
3275 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3287 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3288 /// for a 16 byte align requirement.
3290 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3291 SelectionDAG& DAG) const {
3292 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3293 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3294 unsigned StackAlignment = TFI.getStackAlignment();
3295 uint64_t AlignMask = StackAlignment - 1;
3296 int64_t Offset = StackSize;
3297 unsigned SlotSize = RegInfo->getSlotSize();
3298 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3299 // Number smaller than 12 so just add the difference.
3300 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3302 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3303 Offset = ((~AlignMask) & Offset) + StackAlignment +
3304 (StackAlignment-SlotSize);
3309 /// MatchingStackOffset - Return true if the given stack call argument is
3310 /// already available in the same position (relatively) of the caller's
3311 /// incoming argument stack.
3313 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3314 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3315 const X86InstrInfo *TII) {
3316 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3318 if (Arg.getOpcode() == ISD::CopyFromReg) {
3319 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3320 if (!TargetRegisterInfo::isVirtualRegister(VR))
3322 MachineInstr *Def = MRI->getVRegDef(VR);
3325 if (!Flags.isByVal()) {
3326 if (!TII->isLoadFromStackSlot(Def, FI))
3329 unsigned Opcode = Def->getOpcode();
3330 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3331 Opcode == X86::LEA64_32r) &&
3332 Def->getOperand(1).isFI()) {
3333 FI = Def->getOperand(1).getIndex();
3334 Bytes = Flags.getByValSize();
3338 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3339 if (Flags.isByVal())
3340 // ByVal argument is passed in as a pointer but it's now being
3341 // dereferenced. e.g.
3342 // define @foo(%struct.X* %A) {
3343 // tail call @bar(%struct.X* byval %A)
3346 SDValue Ptr = Ld->getBasePtr();
3347 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3350 FI = FINode->getIndex();
3351 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3352 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3353 FI = FINode->getIndex();
3354 Bytes = Flags.getByValSize();
3358 assert(FI != INT_MAX);
3359 if (!MFI->isFixedObjectIndex(FI))
3361 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3364 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3365 /// for tail call optimization. Targets which want to do tail call
3366 /// optimization should implement this function.
3368 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3369 CallingConv::ID CalleeCC,
3371 bool isCalleeStructRet,
3372 bool isCallerStructRet,
3374 const SmallVectorImpl<ISD::OutputArg> &Outs,
3375 const SmallVectorImpl<SDValue> &OutVals,
3376 const SmallVectorImpl<ISD::InputArg> &Ins,
3377 SelectionDAG &DAG) const {
3378 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3381 // If -tailcallopt is specified, make fastcc functions tail-callable.
3382 const MachineFunction &MF = DAG.getMachineFunction();
3383 const Function *CallerF = MF.getFunction();
3385 // If the function return type is x86_fp80 and the callee return type is not,
3386 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3387 // perform a tailcall optimization here.
3388 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3391 CallingConv::ID CallerCC = CallerF->getCallingConv();
3392 bool CCMatch = CallerCC == CalleeCC;
3393 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3394 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3396 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3397 if (IsTailCallConvention(CalleeCC) && CCMatch)
3402 // Look for obvious safe cases to perform tail call optimization that do not
3403 // require ABI changes. This is what gcc calls sibcall.
3405 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3406 // emit a special epilogue.
3407 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3408 if (RegInfo->needsStackRealignment(MF))
3411 // Also avoid sibcall optimization if either caller or callee uses struct
3412 // return semantics.
3413 if (isCalleeStructRet || isCallerStructRet)
3416 // An stdcall/thiscall caller is expected to clean up its arguments; the
3417 // callee isn't going to do that.
3418 // FIXME: this is more restrictive than needed. We could produce a tailcall
3419 // when the stack adjustment matches. For example, with a thiscall that takes
3420 // only one argument.
3421 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3422 CallerCC == CallingConv::X86_ThisCall))
3425 // Do not sibcall optimize vararg calls unless all arguments are passed via
3427 if (isVarArg && !Outs.empty()) {
3429 // Optimizing for varargs on Win64 is unlikely to be safe without
3430 // additional testing.
3431 if (IsCalleeWin64 || IsCallerWin64)
3434 SmallVector<CCValAssign, 16> ArgLocs;
3435 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3438 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3439 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3440 if (!ArgLocs[i].isRegLoc())
3444 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3445 // stack. Therefore, if it's not used by the call it is not safe to optimize
3446 // this into a sibcall.
3447 bool Unused = false;
3448 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3455 SmallVector<CCValAssign, 16> RVLocs;
3456 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3458 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3459 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3460 CCValAssign &VA = RVLocs[i];
3461 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3466 // If the calling conventions do not match, then we'd better make sure the
3467 // results are returned in the same way as what the caller expects.
3469 SmallVector<CCValAssign, 16> RVLocs1;
3470 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3472 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3474 SmallVector<CCValAssign, 16> RVLocs2;
3475 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3477 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3479 if (RVLocs1.size() != RVLocs2.size())
3481 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3482 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3484 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3486 if (RVLocs1[i].isRegLoc()) {
3487 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3490 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3496 // If the callee takes no arguments then go on to check the results of the
3498 if (!Outs.empty()) {
3499 // Check if stack adjustment is needed. For now, do not do this if any
3500 // argument is passed on the stack.
3501 SmallVector<CCValAssign, 16> ArgLocs;
3502 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3505 // Allocate shadow area for Win64
3507 CCInfo.AllocateStack(32, 8);
3509 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3510 if (CCInfo.getNextStackOffset()) {
3511 MachineFunction &MF = DAG.getMachineFunction();
3512 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3515 // Check if the arguments are already laid out in the right way as
3516 // the caller's fixed stack objects.
3517 MachineFrameInfo *MFI = MF.getFrameInfo();
3518 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3519 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3520 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3521 CCValAssign &VA = ArgLocs[i];
3522 SDValue Arg = OutVals[i];
3523 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3524 if (VA.getLocInfo() == CCValAssign::Indirect)
3526 if (!VA.isRegLoc()) {
3527 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3534 // If the tailcall address may be in a register, then make sure it's
3535 // possible to register allocate for it. In 32-bit, the call address can
3536 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3537 // callee-saved registers are restored. These happen to be the same
3538 // registers used to pass 'inreg' arguments so watch out for those.
3539 if (!Subtarget->is64Bit() &&
3540 ((!isa<GlobalAddressSDNode>(Callee) &&
3541 !isa<ExternalSymbolSDNode>(Callee)) ||
3542 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3543 unsigned NumInRegs = 0;
3544 // In PIC we need an extra register to formulate the address computation
3546 unsigned MaxInRegs =
3547 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3549 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3550 CCValAssign &VA = ArgLocs[i];
3553 unsigned Reg = VA.getLocReg();
3556 case X86::EAX: case X86::EDX: case X86::ECX:
3557 if (++NumInRegs == MaxInRegs)
3569 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3570 const TargetLibraryInfo *libInfo) const {
3571 return X86::createFastISel(funcInfo, libInfo);
3574 //===----------------------------------------------------------------------===//
3575 // Other Lowering Hooks
3576 //===----------------------------------------------------------------------===//
3578 static bool MayFoldLoad(SDValue Op) {
3579 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3582 static bool MayFoldIntoStore(SDValue Op) {
3583 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3586 static bool isTargetShuffle(unsigned Opcode) {
3588 default: return false;
3589 case X86ISD::BLENDI:
3590 case X86ISD::PSHUFB:
3591 case X86ISD::PSHUFD:
3592 case X86ISD::PSHUFHW:
3593 case X86ISD::PSHUFLW:
3595 case X86ISD::PALIGNR:
3596 case X86ISD::MOVLHPS:
3597 case X86ISD::MOVLHPD:
3598 case X86ISD::MOVHLPS:
3599 case X86ISD::MOVLPS:
3600 case X86ISD::MOVLPD:
3601 case X86ISD::MOVSHDUP:
3602 case X86ISD::MOVSLDUP:
3603 case X86ISD::MOVDDUP:
3606 case X86ISD::UNPCKL:
3607 case X86ISD::UNPCKH:
3608 case X86ISD::VPERMILPI:
3609 case X86ISD::VPERM2X128:
3610 case X86ISD::VPERMI:
3615 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3616 SDValue V1, SelectionDAG &DAG) {
3618 default: llvm_unreachable("Unknown x86 shuffle node");
3619 case X86ISD::MOVSHDUP:
3620 case X86ISD::MOVSLDUP:
3621 case X86ISD::MOVDDUP:
3622 return DAG.getNode(Opc, dl, VT, V1);
3626 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3627 SDValue V1, unsigned TargetMask,
3628 SelectionDAG &DAG) {
3630 default: llvm_unreachable("Unknown x86 shuffle node");
3631 case X86ISD::PSHUFD:
3632 case X86ISD::PSHUFHW:
3633 case X86ISD::PSHUFLW:
3634 case X86ISD::VPERMILPI:
3635 case X86ISD::VPERMI:
3636 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3640 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3641 SDValue V1, SDValue V2, unsigned TargetMask,
3642 SelectionDAG &DAG) {
3644 default: llvm_unreachable("Unknown x86 shuffle node");
3645 case X86ISD::PALIGNR:
3646 case X86ISD::VALIGN:
3648 case X86ISD::VPERM2X128:
3649 return DAG.getNode(Opc, dl, VT, V1, V2,
3650 DAG.getConstant(TargetMask, MVT::i8));
3654 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3655 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3657 default: llvm_unreachable("Unknown x86 shuffle node");
3658 case X86ISD::MOVLHPS:
3659 case X86ISD::MOVLHPD:
3660 case X86ISD::MOVHLPS:
3661 case X86ISD::MOVLPS:
3662 case X86ISD::MOVLPD:
3665 case X86ISD::UNPCKL:
3666 case X86ISD::UNPCKH:
3667 return DAG.getNode(Opc, dl, VT, V1, V2);
3671 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3672 MachineFunction &MF = DAG.getMachineFunction();
3673 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3674 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3675 int ReturnAddrIndex = FuncInfo->getRAIndex();
3677 if (ReturnAddrIndex == 0) {
3678 // Set up a frame object for the return address.
3679 unsigned SlotSize = RegInfo->getSlotSize();
3680 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3683 FuncInfo->setRAIndex(ReturnAddrIndex);
3686 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3689 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3690 bool hasSymbolicDisplacement) {
3691 // Offset should fit into 32 bit immediate field.
3692 if (!isInt<32>(Offset))
3695 // If we don't have a symbolic displacement - we don't have any extra
3697 if (!hasSymbolicDisplacement)
3700 // FIXME: Some tweaks might be needed for medium code model.
3701 if (M != CodeModel::Small && M != CodeModel::Kernel)
3704 // For small code model we assume that latest object is 16MB before end of 31
3705 // bits boundary. We may also accept pretty large negative constants knowing
3706 // that all objects are in the positive half of address space.
3707 if (M == CodeModel::Small && Offset < 16*1024*1024)
3710 // For kernel code model we know that all object resist in the negative half
3711 // of 32bits address space. We may not accept negative offsets, since they may
3712 // be just off and we may accept pretty large positive ones.
3713 if (M == CodeModel::Kernel && Offset >= 0)
3719 /// isCalleePop - Determines whether the callee is required to pop its
3720 /// own arguments. Callee pop is necessary to support tail calls.
3721 bool X86::isCalleePop(CallingConv::ID CallingConv,
3722 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3723 switch (CallingConv) {
3726 case CallingConv::X86_StdCall:
3727 case CallingConv::X86_FastCall:
3728 case CallingConv::X86_ThisCall:
3730 case CallingConv::Fast:
3731 case CallingConv::GHC:
3732 case CallingConv::HiPE:
3739 /// \brief Return true if the condition is an unsigned comparison operation.
3740 static bool isX86CCUnsigned(unsigned X86CC) {
3742 default: llvm_unreachable("Invalid integer condition!");
3743 case X86::COND_E: return true;
3744 case X86::COND_G: return false;
3745 case X86::COND_GE: return false;
3746 case X86::COND_L: return false;
3747 case X86::COND_LE: return false;
3748 case X86::COND_NE: return true;
3749 case X86::COND_B: return true;
3750 case X86::COND_A: return true;
3751 case X86::COND_BE: return true;
3752 case X86::COND_AE: return true;
3754 llvm_unreachable("covered switch fell through?!");
3757 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3758 /// specific condition code, returning the condition code and the LHS/RHS of the
3759 /// comparison to make.
3760 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3761 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3763 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3764 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3765 // X > -1 -> X == 0, jump !sign.
3766 RHS = DAG.getConstant(0, RHS.getValueType());
3767 return X86::COND_NS;
3769 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3770 // X < 0 -> X == 0, jump on sign.
3773 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3775 RHS = DAG.getConstant(0, RHS.getValueType());
3776 return X86::COND_LE;
3780 switch (SetCCOpcode) {
3781 default: llvm_unreachable("Invalid integer condition!");
3782 case ISD::SETEQ: return X86::COND_E;
3783 case ISD::SETGT: return X86::COND_G;
3784 case ISD::SETGE: return X86::COND_GE;
3785 case ISD::SETLT: return X86::COND_L;
3786 case ISD::SETLE: return X86::COND_LE;
3787 case ISD::SETNE: return X86::COND_NE;
3788 case ISD::SETULT: return X86::COND_B;
3789 case ISD::SETUGT: return X86::COND_A;
3790 case ISD::SETULE: return X86::COND_BE;
3791 case ISD::SETUGE: return X86::COND_AE;
3795 // First determine if it is required or is profitable to flip the operands.
3797 // If LHS is a foldable load, but RHS is not, flip the condition.
3798 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3799 !ISD::isNON_EXTLoad(RHS.getNode())) {
3800 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3801 std::swap(LHS, RHS);
3804 switch (SetCCOpcode) {
3810 std::swap(LHS, RHS);
3814 // On a floating point condition, the flags are set as follows:
3816 // 0 | 0 | 0 | X > Y
3817 // 0 | 0 | 1 | X < Y
3818 // 1 | 0 | 0 | X == Y
3819 // 1 | 1 | 1 | unordered
3820 switch (SetCCOpcode) {
3821 default: llvm_unreachable("Condcode should be pre-legalized away");
3823 case ISD::SETEQ: return X86::COND_E;
3824 case ISD::SETOLT: // flipped
3826 case ISD::SETGT: return X86::COND_A;
3827 case ISD::SETOLE: // flipped
3829 case ISD::SETGE: return X86::COND_AE;
3830 case ISD::SETUGT: // flipped
3832 case ISD::SETLT: return X86::COND_B;
3833 case ISD::SETUGE: // flipped
3835 case ISD::SETLE: return X86::COND_BE;
3837 case ISD::SETNE: return X86::COND_NE;
3838 case ISD::SETUO: return X86::COND_P;
3839 case ISD::SETO: return X86::COND_NP;
3841 case ISD::SETUNE: return X86::COND_INVALID;
3845 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3846 /// code. Current x86 isa includes the following FP cmov instructions:
3847 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3848 static bool hasFPCMov(unsigned X86CC) {
3864 /// isFPImmLegal - Returns true if the target can instruction select the
3865 /// specified FP immediate natively. If false, the legalizer will
3866 /// materialize the FP immediate as a load from a constant pool.
3867 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3868 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3869 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3875 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3876 ISD::LoadExtType ExtTy,
3878 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3879 // relocation target a movq or addq instruction: don't let the load shrink.
3880 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3881 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3882 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3883 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3887 /// \brief Returns true if it is beneficial to convert a load of a constant
3888 /// to just the constant itself.
3889 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3891 assert(Ty->isIntegerTy());
3893 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3894 if (BitSize == 0 || BitSize > 64)
3899 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3900 unsigned Index) const {
3901 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3904 return (Index == 0 || Index == ResVT.getVectorNumElements());
3907 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3908 // Speculate cttz only if we can directly use TZCNT.
3909 return Subtarget->hasBMI();
3912 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3913 // Speculate ctlz only if we can directly use LZCNT.
3914 return Subtarget->hasLZCNT();
3917 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3918 /// the specified range (L, H].
3919 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3920 return (Val < 0) || (Val >= Low && Val < Hi);
3923 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3924 /// specified value.
3925 static bool isUndefOrEqual(int Val, int CmpVal) {
3926 return (Val < 0 || Val == CmpVal);
3929 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3930 /// from position Pos and ending in Pos+Size, falls within the specified
3931 /// sequential range (Low, Low+Size]. or is undef.
3932 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3933 unsigned Pos, unsigned Size, int Low) {
3934 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3935 if (!isUndefOrEqual(Mask[i], Low))
3940 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3941 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3942 /// operand - by default will match for first operand.
3943 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3944 bool TestSecondOperand = false) {
3945 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3946 VT != MVT::v2f64 && VT != MVT::v2i64)
3949 unsigned NumElems = VT.getVectorNumElements();
3950 unsigned Lo = TestSecondOperand ? NumElems : 0;
3951 unsigned Hi = Lo + NumElems;
3953 for (unsigned i = 0; i < NumElems; ++i)
3954 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3960 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3961 /// is suitable for input to PSHUFHW.
3962 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3963 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3966 // Lower quadword copied in order or undef.
3967 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3970 // Upper quadword shuffled.
3971 for (unsigned i = 4; i != 8; ++i)
3972 if (!isUndefOrInRange(Mask[i], 4, 8))
3975 if (VT == MVT::v16i16) {
3976 // Lower quadword copied in order or undef.
3977 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3980 // Upper quadword shuffled.
3981 for (unsigned i = 12; i != 16; ++i)
3982 if (!isUndefOrInRange(Mask[i], 12, 16))
3989 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3990 /// is suitable for input to PSHUFLW.
3991 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3992 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3995 // Upper quadword copied in order.
3996 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
3999 // Lower quadword shuffled.
4000 for (unsigned i = 0; i != 4; ++i)
4001 if (!isUndefOrInRange(Mask[i], 0, 4))
4004 if (VT == MVT::v16i16) {
4005 // Upper quadword copied in order.
4006 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4009 // Lower quadword shuffled.
4010 for (unsigned i = 8; i != 12; ++i)
4011 if (!isUndefOrInRange(Mask[i], 8, 12))
4018 /// \brief Return true if the mask specifies a shuffle of elements that is
4019 /// suitable for input to intralane (palignr) or interlane (valign) vector
4021 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4022 unsigned NumElts = VT.getVectorNumElements();
4023 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4024 unsigned NumLaneElts = NumElts/NumLanes;
4026 // Do not handle 64-bit element shuffles with palignr.
4027 if (NumLaneElts == 2)
4030 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4032 for (i = 0; i != NumLaneElts; ++i) {
4037 // Lane is all undef, go to next lane
4038 if (i == NumLaneElts)
4041 int Start = Mask[i+l];
4043 // Make sure its in this lane in one of the sources
4044 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4045 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4048 // If not lane 0, then we must match lane 0
4049 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4052 // Correct second source to be contiguous with first source
4053 if (Start >= (int)NumElts)
4054 Start -= NumElts - NumLaneElts;
4056 // Make sure we're shifting in the right direction.
4057 if (Start <= (int)(i+l))
4062 // Check the rest of the elements to see if they are consecutive.
4063 for (++i; i != NumLaneElts; ++i) {
4064 int Idx = Mask[i+l];
4066 // Make sure its in this lane
4067 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4068 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4071 // If not lane 0, then we must match lane 0
4072 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4075 if (Idx >= (int)NumElts)
4076 Idx -= NumElts - NumLaneElts;
4078 if (!isUndefOrEqual(Idx, Start+i))
4087 /// \brief Return true if the node specifies a shuffle of elements that is
4088 /// suitable for input to PALIGNR.
4089 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4090 const X86Subtarget *Subtarget) {
4091 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4092 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4093 VT.is512BitVector())
4094 // FIXME: Add AVX512BW.
4097 return isAlignrMask(Mask, VT, false);
4100 /// \brief Return true if the node specifies a shuffle of elements that is
4101 /// suitable for input to VALIGN.
4102 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4103 const X86Subtarget *Subtarget) {
4104 // FIXME: Add AVX512VL.
4105 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4107 return isAlignrMask(Mask, VT, true);
4110 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4111 /// the two vector operands have swapped position.
4112 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4113 unsigned NumElems) {
4114 for (unsigned i = 0; i != NumElems; ++i) {
4118 else if (idx < (int)NumElems)
4119 Mask[i] = idx + NumElems;
4121 Mask[i] = idx - NumElems;
4125 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4126 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4127 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4128 /// reverse of what x86 shuffles want.
4129 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4131 unsigned NumElems = VT.getVectorNumElements();
4132 unsigned NumLanes = VT.getSizeInBits()/128;
4133 unsigned NumLaneElems = NumElems/NumLanes;
4135 if (NumLaneElems != 2 && NumLaneElems != 4)
4138 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4139 bool symmetricMaskRequired =
4140 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4142 // VSHUFPSY divides the resulting vector into 4 chunks.
4143 // The sources are also splitted into 4 chunks, and each destination
4144 // chunk must come from a different source chunk.
4146 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4147 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4149 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4150 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4152 // VSHUFPDY divides the resulting vector into 4 chunks.
4153 // The sources are also splitted into 4 chunks, and each destination
4154 // chunk must come from a different source chunk.
4156 // SRC1 => X3 X2 X1 X0
4157 // SRC2 => Y3 Y2 Y1 Y0
4159 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4161 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4162 unsigned HalfLaneElems = NumLaneElems/2;
4163 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4164 for (unsigned i = 0; i != NumLaneElems; ++i) {
4165 int Idx = Mask[i+l];
4166 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4167 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4169 // For VSHUFPSY, the mask of the second half must be the same as the
4170 // first but with the appropriate offsets. This works in the same way as
4171 // VPERMILPS works with masks.
4172 if (!symmetricMaskRequired || Idx < 0)
4174 if (MaskVal[i] < 0) {
4175 MaskVal[i] = Idx - l;
4178 if ((signed)(Idx - l) != MaskVal[i])
4186 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4187 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4188 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4189 if (!VT.is128BitVector())
4192 unsigned NumElems = VT.getVectorNumElements();
4197 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4198 return isUndefOrEqual(Mask[0], 6) &&
4199 isUndefOrEqual(Mask[1], 7) &&
4200 isUndefOrEqual(Mask[2], 2) &&
4201 isUndefOrEqual(Mask[3], 3);
4204 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4205 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4207 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4208 if (!VT.is128BitVector())
4211 unsigned NumElems = VT.getVectorNumElements();
4216 return isUndefOrEqual(Mask[0], 2) &&
4217 isUndefOrEqual(Mask[1], 3) &&
4218 isUndefOrEqual(Mask[2], 2) &&
4219 isUndefOrEqual(Mask[3], 3);
4222 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4223 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4224 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4225 if (!VT.is128BitVector())
4228 unsigned NumElems = VT.getVectorNumElements();
4230 if (NumElems != 2 && NumElems != 4)
4233 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4234 if (!isUndefOrEqual(Mask[i], i + NumElems))
4237 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4238 if (!isUndefOrEqual(Mask[i], i))
4244 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4245 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4246 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4247 if (!VT.is128BitVector())
4250 unsigned NumElems = VT.getVectorNumElements();
4252 if (NumElems != 2 && NumElems != 4)
4255 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4256 if (!isUndefOrEqual(Mask[i], i))
4259 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4260 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4266 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4267 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4268 /// i. e: If all but one element come from the same vector.
4269 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4270 // TODO: Deal with AVX's VINSERTPS
4271 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4274 unsigned CorrectPosV1 = 0;
4275 unsigned CorrectPosV2 = 0;
4276 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4277 if (Mask[i] == -1) {
4285 else if (Mask[i] == i + 4)
4289 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4290 // We have 3 elements (undefs count as elements from any vector) from one
4291 // vector, and one from another.
4298 // Some special combinations that can be optimized.
4301 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4302 SelectionDAG &DAG) {
4303 MVT VT = SVOp->getSimpleValueType(0);
4306 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4309 ArrayRef<int> Mask = SVOp->getMask();
4311 // These are the special masks that may be optimized.
4312 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4313 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4314 bool MatchEvenMask = true;
4315 bool MatchOddMask = true;
4316 for (int i=0; i<8; ++i) {
4317 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4318 MatchEvenMask = false;
4319 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4320 MatchOddMask = false;
4323 if (!MatchEvenMask && !MatchOddMask)
4326 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4328 SDValue Op0 = SVOp->getOperand(0);
4329 SDValue Op1 = SVOp->getOperand(1);
4331 if (MatchEvenMask) {
4332 // Shift the second operand right to 32 bits.
4333 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4334 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4336 // Shift the first operand left to 32 bits.
4337 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4338 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4340 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4341 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4344 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4345 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4346 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4347 bool HasInt256, bool V2IsSplat = false) {
4349 assert(VT.getSizeInBits() >= 128 &&
4350 "Unsupported vector type for unpckl");
4352 unsigned NumElts = VT.getVectorNumElements();
4353 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4354 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4357 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4358 "Unsupported vector type for unpckh");
4360 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4361 unsigned NumLanes = VT.getSizeInBits()/128;
4362 unsigned NumLaneElts = NumElts/NumLanes;
4364 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4365 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4366 int BitI = Mask[l+i];
4367 int BitI1 = Mask[l+i+1];
4368 if (!isUndefOrEqual(BitI, j))
4371 if (!isUndefOrEqual(BitI1, NumElts))
4374 if (!isUndefOrEqual(BitI1, j + NumElts))
4383 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4384 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4385 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4386 bool HasInt256, bool V2IsSplat = false) {
4387 assert(VT.getSizeInBits() >= 128 &&
4388 "Unsupported vector type for unpckh");
4390 unsigned NumElts = VT.getVectorNumElements();
4391 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4392 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4395 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4396 "Unsupported vector type for unpckh");
4398 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4399 unsigned NumLanes = VT.getSizeInBits()/128;
4400 unsigned NumLaneElts = NumElts/NumLanes;
4402 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4403 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4404 int BitI = Mask[l+i];
4405 int BitI1 = Mask[l+i+1];
4406 if (!isUndefOrEqual(BitI, j))
4409 if (isUndefOrEqual(BitI1, NumElts))
4412 if (!isUndefOrEqual(BitI1, j+NumElts))
4420 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4421 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4423 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4424 unsigned NumElts = VT.getVectorNumElements();
4425 bool Is256BitVec = VT.is256BitVector();
4427 if (VT.is512BitVector())
4429 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4430 "Unsupported vector type for unpckh");
4432 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4433 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4436 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4437 // FIXME: Need a better way to get rid of this, there's no latency difference
4438 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4439 // the former later. We should also remove the "_undef" special mask.
4440 if (NumElts == 4 && Is256BitVec)
4443 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4444 // independently on 128-bit lanes.
4445 unsigned NumLanes = VT.getSizeInBits()/128;
4446 unsigned NumLaneElts = NumElts/NumLanes;
4448 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4449 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4450 int BitI = Mask[l+i];
4451 int BitI1 = Mask[l+i+1];
4453 if (!isUndefOrEqual(BitI, j))
4455 if (!isUndefOrEqual(BitI1, j))
4463 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4464 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4466 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4467 unsigned NumElts = VT.getVectorNumElements();
4469 if (VT.is512BitVector())
4472 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4473 "Unsupported vector type for unpckh");
4475 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4476 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4479 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4480 // independently on 128-bit lanes.
4481 unsigned NumLanes = VT.getSizeInBits()/128;
4482 unsigned NumLaneElts = NumElts/NumLanes;
4484 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4485 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4486 int BitI = Mask[l+i];
4487 int BitI1 = Mask[l+i+1];
4488 if (!isUndefOrEqual(BitI, j))
4490 if (!isUndefOrEqual(BitI1, j))
4497 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4498 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4499 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4500 if (!VT.is512BitVector())
4503 unsigned NumElts = VT.getVectorNumElements();
4504 unsigned HalfSize = NumElts/2;
4505 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4506 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4511 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4512 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4520 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4521 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4522 /// MOVSD, and MOVD, i.e. setting the lowest element.
4523 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4524 if (VT.getVectorElementType().getSizeInBits() < 32)
4526 if (!VT.is128BitVector())
4529 unsigned NumElts = VT.getVectorNumElements();
4531 if (!isUndefOrEqual(Mask[0], NumElts))
4534 for (unsigned i = 1; i != NumElts; ++i)
4535 if (!isUndefOrEqual(Mask[i], i))
4541 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4542 /// as permutations between 128-bit chunks or halves. As an example: this
4544 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4545 /// The first half comes from the second half of V1 and the second half from the
4546 /// the second half of V2.
4547 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4548 if (!HasFp256 || !VT.is256BitVector())
4551 // The shuffle result is divided into half A and half B. In total the two
4552 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4553 // B must come from C, D, E or F.
4554 unsigned HalfSize = VT.getVectorNumElements()/2;
4555 bool MatchA = false, MatchB = false;
4557 // Check if A comes from one of C, D, E, F.
4558 for (unsigned Half = 0; Half != 4; ++Half) {
4559 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4565 // Check if B comes from one of C, D, E, F.
4566 for (unsigned Half = 0; Half != 4; ++Half) {
4567 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4573 return MatchA && MatchB;
4576 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4577 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4578 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4579 MVT VT = SVOp->getSimpleValueType(0);
4581 unsigned HalfSize = VT.getVectorNumElements()/2;
4583 unsigned FstHalf = 0, SndHalf = 0;
4584 for (unsigned i = 0; i < HalfSize; ++i) {
4585 if (SVOp->getMaskElt(i) > 0) {
4586 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4590 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4591 if (SVOp->getMaskElt(i) > 0) {
4592 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4597 return (FstHalf | (SndHalf << 4));
4600 // Symmetric in-lane mask. Each lane has 4 elements (for imm8)
4601 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4602 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4606 unsigned NumElts = VT.getVectorNumElements();
4608 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4609 for (unsigned i = 0; i != NumElts; ++i) {
4612 Imm8 |= Mask[i] << (i*2);
4617 unsigned LaneSize = 4;
4618 SmallVector<int, 4> MaskVal(LaneSize, -1);
4620 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4621 for (unsigned i = 0; i != LaneSize; ++i) {
4622 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4626 if (MaskVal[i] < 0) {
4627 MaskVal[i] = Mask[i+l] - l;
4628 Imm8 |= MaskVal[i] << (i*2);
4631 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4638 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4639 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4640 /// Note that VPERMIL mask matching is different depending whether theunderlying
4641 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4642 /// to the same elements of the low, but to the higher half of the source.
4643 /// In VPERMILPD the two lanes could be shuffled independently of each other
4644 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4645 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4646 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4647 if (VT.getSizeInBits() < 256 || EltSize < 32)
4649 bool symmetricMaskRequired = (EltSize == 32);
4650 unsigned NumElts = VT.getVectorNumElements();
4652 unsigned NumLanes = VT.getSizeInBits()/128;
4653 unsigned LaneSize = NumElts/NumLanes;
4654 // 2 or 4 elements in one lane
4656 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4657 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4658 for (unsigned i = 0; i != LaneSize; ++i) {
4659 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4661 if (symmetricMaskRequired) {
4662 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4663 ExpectedMaskVal[i] = Mask[i+l] - l;
4666 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4674 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4675 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4676 /// element of vector 2 and the other elements to come from vector 1 in order.
4677 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4678 bool V2IsSplat = false, bool V2IsUndef = false) {
4679 if (!VT.is128BitVector())
4682 unsigned NumOps = VT.getVectorNumElements();
4683 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4686 if (!isUndefOrEqual(Mask[0], 0))
4689 for (unsigned i = 1; i != NumOps; ++i)
4690 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4691 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4692 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4698 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4699 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4700 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4701 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4702 const X86Subtarget *Subtarget) {
4703 if (!Subtarget->hasSSE3())
4706 unsigned NumElems = VT.getVectorNumElements();
4708 if ((VT.is128BitVector() && NumElems != 4) ||
4709 (VT.is256BitVector() && NumElems != 8) ||
4710 (VT.is512BitVector() && NumElems != 16))
4713 // "i+1" is the value the indexed mask element must have
4714 for (unsigned i = 0; i != NumElems; i += 2)
4715 if (!isUndefOrEqual(Mask[i], i+1) ||
4716 !isUndefOrEqual(Mask[i+1], i+1))
4722 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4723 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4724 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4725 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4726 const X86Subtarget *Subtarget) {
4727 if (!Subtarget->hasSSE3())
4730 unsigned NumElems = VT.getVectorNumElements();
4732 if ((VT.is128BitVector() && NumElems != 4) ||
4733 (VT.is256BitVector() && NumElems != 8) ||
4734 (VT.is512BitVector() && NumElems != 16))
4737 // "i" is the value the indexed mask element must have
4738 for (unsigned i = 0; i != NumElems; i += 2)
4739 if (!isUndefOrEqual(Mask[i], i) ||
4740 !isUndefOrEqual(Mask[i+1], i))
4746 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4747 /// specifies a shuffle of elements that is suitable for input to 256-bit
4748 /// version of MOVDDUP.
4749 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4750 if (!HasFp256 || !VT.is256BitVector())
4753 unsigned NumElts = VT.getVectorNumElements();
4757 for (unsigned i = 0; i != NumElts/2; ++i)
4758 if (!isUndefOrEqual(Mask[i], 0))
4760 for (unsigned i = NumElts/2; i != NumElts; ++i)
4761 if (!isUndefOrEqual(Mask[i], NumElts/2))
4766 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4767 /// specifies a shuffle of elements that is suitable for input to 128-bit
4768 /// version of MOVDDUP.
4769 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4770 if (!VT.is128BitVector())
4773 unsigned e = VT.getVectorNumElements() / 2;
4774 for (unsigned i = 0; i != e; ++i)
4775 if (!isUndefOrEqual(Mask[i], i))
4777 for (unsigned i = 0; i != e; ++i)
4778 if (!isUndefOrEqual(Mask[e+i], i))
4783 /// isVEXTRACTIndex - Return true if the specified
4784 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4785 /// suitable for instruction that extract 128 or 256 bit vectors
4786 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4787 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4788 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4791 // The index should be aligned on a vecWidth-bit boundary.
4793 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4795 MVT VT = N->getSimpleValueType(0);
4796 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4797 bool Result = (Index * ElSize) % vecWidth == 0;
4802 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4803 /// operand specifies a subvector insert that is suitable for input to
4804 /// insertion of 128 or 256-bit subvectors
4805 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4806 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4807 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4809 // The index should be aligned on a vecWidth-bit boundary.
4811 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4813 MVT VT = N->getSimpleValueType(0);
4814 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4815 bool Result = (Index * ElSize) % vecWidth == 0;
4820 bool X86::isVINSERT128Index(SDNode *N) {
4821 return isVINSERTIndex(N, 128);
4824 bool X86::isVINSERT256Index(SDNode *N) {
4825 return isVINSERTIndex(N, 256);
4828 bool X86::isVEXTRACT128Index(SDNode *N) {
4829 return isVEXTRACTIndex(N, 128);
4832 bool X86::isVEXTRACT256Index(SDNode *N) {
4833 return isVEXTRACTIndex(N, 256);
4836 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4837 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4838 /// Handles 128-bit and 256-bit.
4839 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4840 MVT VT = N->getSimpleValueType(0);
4842 assert((VT.getSizeInBits() >= 128) &&
4843 "Unsupported vector type for PSHUF/SHUFP");
4845 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4846 // independently on 128-bit lanes.
4847 unsigned NumElts = VT.getVectorNumElements();
4848 unsigned NumLanes = VT.getSizeInBits()/128;
4849 unsigned NumLaneElts = NumElts/NumLanes;
4851 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4852 "Only supports 2, 4 or 8 elements per lane");
4854 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4856 for (unsigned i = 0; i != NumElts; ++i) {
4857 int Elt = N->getMaskElt(i);
4858 if (Elt < 0) continue;
4859 Elt &= NumLaneElts - 1;
4860 unsigned ShAmt = (i << Shift) % 8;
4861 Mask |= Elt << ShAmt;
4867 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4868 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4869 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4870 MVT VT = N->getSimpleValueType(0);
4872 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4873 "Unsupported vector type for PSHUFHW");
4875 unsigned NumElts = VT.getVectorNumElements();
4878 for (unsigned l = 0; l != NumElts; l += 8) {
4879 // 8 nodes per lane, but we only care about the last 4.
4880 for (unsigned i = 0; i < 4; ++i) {
4881 int Elt = N->getMaskElt(l+i+4);
4882 if (Elt < 0) continue;
4883 Elt &= 0x3; // only 2-bits.
4884 Mask |= Elt << (i * 2);
4891 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4892 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4893 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4894 MVT VT = N->getSimpleValueType(0);
4896 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4897 "Unsupported vector type for PSHUFHW");
4899 unsigned NumElts = VT.getVectorNumElements();
4902 for (unsigned l = 0; l != NumElts; l += 8) {
4903 // 8 nodes per lane, but we only care about the first 4.
4904 for (unsigned i = 0; i < 4; ++i) {
4905 int Elt = N->getMaskElt(l+i);
4906 if (Elt < 0) continue;
4907 Elt &= 0x3; // only 2-bits
4908 Mask |= Elt << (i * 2);
4915 /// \brief Return the appropriate immediate to shuffle the specified
4916 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4917 /// VALIGN (if Interlane is true) instructions.
4918 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4920 MVT VT = SVOp->getSimpleValueType(0);
4921 unsigned EltSize = InterLane ? 1 :
4922 VT.getVectorElementType().getSizeInBits() >> 3;
4924 unsigned NumElts = VT.getVectorNumElements();
4925 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4926 unsigned NumLaneElts = NumElts/NumLanes;
4930 for (i = 0; i != NumElts; ++i) {
4931 Val = SVOp->getMaskElt(i);
4935 if (Val >= (int)NumElts)
4936 Val -= NumElts - NumLaneElts;
4938 assert(Val - i > 0 && "PALIGNR imm should be positive");
4939 return (Val - i) * EltSize;
4942 /// \brief Return the appropriate immediate to shuffle the specified
4943 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4944 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4945 return getShuffleAlignrImmediate(SVOp, false);
4948 /// \brief Return the appropriate immediate to shuffle the specified
4949 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4950 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4951 return getShuffleAlignrImmediate(SVOp, true);
4955 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4956 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4957 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4958 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4961 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4963 MVT VecVT = N->getOperand(0).getSimpleValueType();
4964 MVT ElVT = VecVT.getVectorElementType();
4966 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4967 return Index / NumElemsPerChunk;
4970 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4971 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4972 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4973 llvm_unreachable("Illegal insert subvector for VINSERT");
4976 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4978 MVT VecVT = N->getSimpleValueType(0);
4979 MVT ElVT = VecVT.getVectorElementType();
4981 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4982 return Index / NumElemsPerChunk;
4985 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4986 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4987 /// and VINSERTI128 instructions.
4988 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4989 return getExtractVEXTRACTImmediate(N, 128);
4992 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
4993 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
4994 /// and VINSERTI64x4 instructions.
4995 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
4996 return getExtractVEXTRACTImmediate(N, 256);
4999 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5000 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5001 /// and VINSERTI128 instructions.
5002 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5003 return getInsertVINSERTImmediate(N, 128);
5006 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5007 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5008 /// and VINSERTI64x4 instructions.
5009 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5010 return getInsertVINSERTImmediate(N, 256);
5013 /// isZero - Returns true if Elt is a constant integer zero
5014 static bool isZero(SDValue V) {
5015 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5016 return C && C->isNullValue();
5019 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5021 bool X86::isZeroNode(SDValue Elt) {
5024 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5025 return CFP->getValueAPF().isPosZero();
5029 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5030 /// match movhlps. The lower half elements should come from upper half of
5031 /// V1 (and in order), and the upper half elements should come from the upper
5032 /// half of V2 (and in order).
5033 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5034 if (!VT.is128BitVector())
5036 if (VT.getVectorNumElements() != 4)
5038 for (unsigned i = 0, e = 2; i != e; ++i)
5039 if (!isUndefOrEqual(Mask[i], i+2))
5041 for (unsigned i = 2; i != 4; ++i)
5042 if (!isUndefOrEqual(Mask[i], i+4))
5047 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5048 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5050 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5051 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5053 N = N->getOperand(0).getNode();
5054 if (!ISD::isNON_EXTLoad(N))
5057 *LD = cast<LoadSDNode>(N);
5061 // Test whether the given value is a vector value which will be legalized
5063 static bool WillBeConstantPoolLoad(SDNode *N) {
5064 if (N->getOpcode() != ISD::BUILD_VECTOR)
5067 // Check for any non-constant elements.
5068 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5069 switch (N->getOperand(i).getNode()->getOpcode()) {
5071 case ISD::ConstantFP:
5078 // Vectors of all-zeros and all-ones are materialized with special
5079 // instructions rather than being loaded.
5080 return !ISD::isBuildVectorAllZeros(N) &&
5081 !ISD::isBuildVectorAllOnes(N);
5084 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5085 /// match movlp{s|d}. The lower half elements should come from lower half of
5086 /// V1 (and in order), and the upper half elements should come from the upper
5087 /// half of V2 (and in order). And since V1 will become the source of the
5088 /// MOVLP, it must be either a vector load or a scalar load to vector.
5089 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5090 ArrayRef<int> Mask, MVT VT) {
5091 if (!VT.is128BitVector())
5094 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5096 // Is V2 is a vector load, don't do this transformation. We will try to use
5097 // load folding shufps op.
5098 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5101 unsigned NumElems = VT.getVectorNumElements();
5103 if (NumElems != 2 && NumElems != 4)
5105 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5106 if (!isUndefOrEqual(Mask[i], i))
5108 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5109 if (!isUndefOrEqual(Mask[i], i+NumElems))
5114 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5115 /// to an zero vector.
5116 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5117 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5118 SDValue V1 = N->getOperand(0);
5119 SDValue V2 = N->getOperand(1);
5120 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5121 for (unsigned i = 0; i != NumElems; ++i) {
5122 int Idx = N->getMaskElt(i);
5123 if (Idx >= (int)NumElems) {
5124 unsigned Opc = V2.getOpcode();
5125 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5127 if (Opc != ISD::BUILD_VECTOR ||
5128 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5130 } else if (Idx >= 0) {
5131 unsigned Opc = V1.getOpcode();
5132 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5134 if (Opc != ISD::BUILD_VECTOR ||
5135 !X86::isZeroNode(V1.getOperand(Idx)))
5142 /// getZeroVector - Returns a vector of specified type with all zero elements.
5144 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5145 SelectionDAG &DAG, SDLoc dl) {
5146 assert(VT.isVector() && "Expected a vector type");
5148 // Always build SSE zero vectors as <4 x i32> bitcasted
5149 // to their dest type. This ensures they get CSE'd.
5151 if (VT.is128BitVector()) { // SSE
5152 if (Subtarget->hasSSE2()) { // SSE2
5153 SDValue Cst = DAG.getConstant(0, MVT::i32);
5154 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5156 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5157 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5159 } else if (VT.is256BitVector()) { // AVX
5160 if (Subtarget->hasInt256()) { // AVX2
5161 SDValue Cst = DAG.getConstant(0, MVT::i32);
5162 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5163 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5165 // 256-bit logic and arithmetic instructions in AVX are all
5166 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5167 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5168 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5169 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5171 } else if (VT.is512BitVector()) { // AVX-512
5172 SDValue Cst = DAG.getConstant(0, MVT::i32);
5173 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5174 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5175 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5176 } else if (VT.getScalarType() == MVT::i1) {
5177 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5178 SDValue Cst = DAG.getConstant(0, MVT::i1);
5179 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5180 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5182 llvm_unreachable("Unexpected vector type");
5184 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5187 /// getOnesVector - Returns a vector of specified type with all bits set.
5188 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5189 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5190 /// Then bitcast to their original type, ensuring they get CSE'd.
5191 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5193 assert(VT.isVector() && "Expected a vector type");
5195 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5197 if (VT.is256BitVector()) {
5198 if (HasInt256) { // AVX2
5199 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5200 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5202 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5203 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5205 } else if (VT.is128BitVector()) {
5206 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5208 llvm_unreachable("Unexpected vector type");
5210 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5213 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5214 /// that point to V2 points to its first element.
5215 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5216 for (unsigned i = 0; i != NumElems; ++i) {
5217 if (Mask[i] > (int)NumElems) {
5223 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5224 /// operation of specified width.
5225 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5227 unsigned NumElems = VT.getVectorNumElements();
5228 SmallVector<int, 8> Mask;
5229 Mask.push_back(NumElems);
5230 for (unsigned i = 1; i != NumElems; ++i)
5232 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5235 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5236 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5238 unsigned NumElems = VT.getVectorNumElements();
5239 SmallVector<int, 8> Mask;
5240 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5242 Mask.push_back(i + NumElems);
5244 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5247 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5248 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5250 unsigned NumElems = VT.getVectorNumElements();
5251 SmallVector<int, 8> Mask;
5252 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5253 Mask.push_back(i + Half);
5254 Mask.push_back(i + NumElems + Half);
5256 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5259 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5260 // a generic shuffle instruction because the target has no such instructions.
5261 // Generate shuffles which repeat i16 and i8 several times until they can be
5262 // represented by v4f32 and then be manipulated by target suported shuffles.
5263 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5264 MVT VT = V.getSimpleValueType();
5265 int NumElems = VT.getVectorNumElements();
5268 while (NumElems > 4) {
5269 if (EltNo < NumElems/2) {
5270 V = getUnpackl(DAG, dl, VT, V, V);
5272 V = getUnpackh(DAG, dl, VT, V, V);
5273 EltNo -= NumElems/2;
5280 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5281 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5282 MVT VT = V.getSimpleValueType();
5285 if (VT.is128BitVector()) {
5286 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5287 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5288 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5290 } else if (VT.is256BitVector()) {
5291 // To use VPERMILPS to splat scalars, the second half of indicies must
5292 // refer to the higher part, which is a duplication of the lower one,
5293 // because VPERMILPS can only handle in-lane permutations.
5294 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5295 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5297 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5298 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5301 llvm_unreachable("Vector size not supported");
5303 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5306 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5307 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5308 MVT SrcVT = SV->getSimpleValueType(0);
5309 SDValue V1 = SV->getOperand(0);
5312 int EltNo = SV->getSplatIndex();
5313 int NumElems = SrcVT.getVectorNumElements();
5314 bool Is256BitVec = SrcVT.is256BitVector();
5316 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5317 "Unknown how to promote splat for type");
5319 // Extract the 128-bit part containing the splat element and update
5320 // the splat element index when it refers to the higher register.
5322 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5323 if (EltNo >= NumElems/2)
5324 EltNo -= NumElems/2;
5327 // All i16 and i8 vector types can't be used directly by a generic shuffle
5328 // instruction because the target has no such instruction. Generate shuffles
5329 // which repeat i16 and i8 several times until they fit in i32, and then can
5330 // be manipulated by target suported shuffles.
5331 MVT EltVT = SrcVT.getVectorElementType();
5332 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5333 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5335 // Recreate the 256-bit vector and place the same 128-bit vector
5336 // into the low and high part. This is necessary because we want
5337 // to use VPERM* to shuffle the vectors
5339 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5342 return getLegalSplat(DAG, V1, EltNo);
5345 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5346 /// vector of zero or undef vector. This produces a shuffle where the low
5347 /// element of V2 is swizzled into the zero/undef vector, landing at element
5348 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5349 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5351 const X86Subtarget *Subtarget,
5352 SelectionDAG &DAG) {
5353 MVT VT = V2.getSimpleValueType();
5355 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5356 unsigned NumElems = VT.getVectorNumElements();
5357 SmallVector<int, 16> MaskVec;
5358 for (unsigned i = 0; i != NumElems; ++i)
5359 // If this is the insertion idx, put the low elt of V2 here.
5360 MaskVec.push_back(i == Idx ? NumElems : i);
5361 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5364 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5365 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5366 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5367 /// shuffles which use a single input multiple times, and in those cases it will
5368 /// adjust the mask to only have indices within that single input.
5369 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5370 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5371 unsigned NumElems = VT.getVectorNumElements();
5375 bool IsFakeUnary = false;
5376 switch(N->getOpcode()) {
5377 case X86ISD::BLENDI:
5378 ImmN = N->getOperand(N->getNumOperands()-1);
5379 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5382 ImmN = N->getOperand(N->getNumOperands()-1);
5383 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5384 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5386 case X86ISD::UNPCKH:
5387 DecodeUNPCKHMask(VT, Mask);
5388 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5390 case X86ISD::UNPCKL:
5391 DecodeUNPCKLMask(VT, Mask);
5392 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5394 case X86ISD::MOVHLPS:
5395 DecodeMOVHLPSMask(NumElems, Mask);
5396 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5398 case X86ISD::MOVLHPS:
5399 DecodeMOVLHPSMask(NumElems, Mask);
5400 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5402 case X86ISD::PALIGNR:
5403 ImmN = N->getOperand(N->getNumOperands()-1);
5404 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5406 case X86ISD::PSHUFD:
5407 case X86ISD::VPERMILPI:
5408 ImmN = N->getOperand(N->getNumOperands()-1);
5409 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5412 case X86ISD::PSHUFHW:
5413 ImmN = N->getOperand(N->getNumOperands()-1);
5414 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5417 case X86ISD::PSHUFLW:
5418 ImmN = N->getOperand(N->getNumOperands()-1);
5419 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5422 case X86ISD::PSHUFB: {
5424 SDValue MaskNode = N->getOperand(1);
5425 while (MaskNode->getOpcode() == ISD::BITCAST)
5426 MaskNode = MaskNode->getOperand(0);
5428 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5429 // If we have a build-vector, then things are easy.
5430 EVT VT = MaskNode.getValueType();
5431 assert(VT.isVector() &&
5432 "Can't produce a non-vector with a build_vector!");
5433 if (!VT.isInteger())
5436 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5438 SmallVector<uint64_t, 32> RawMask;
5439 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5440 SDValue Op = MaskNode->getOperand(i);
5441 if (Op->getOpcode() == ISD::UNDEF) {
5442 RawMask.push_back((uint64_t)SM_SentinelUndef);
5445 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5448 APInt MaskElement = CN->getAPIntValue();
5450 // We now have to decode the element which could be any integer size and
5451 // extract each byte of it.
5452 for (int j = 0; j < NumBytesPerElement; ++j) {
5453 // Note that this is x86 and so always little endian: the low byte is
5454 // the first byte of the mask.
5455 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5456 MaskElement = MaskElement.lshr(8);
5459 DecodePSHUFBMask(RawMask, Mask);
5463 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5467 SDValue Ptr = MaskLoad->getBasePtr();
5468 if (Ptr->getOpcode() == X86ISD::Wrapper)
5469 Ptr = Ptr->getOperand(0);
5471 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5472 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5475 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5476 DecodePSHUFBMask(C, Mask);
5484 case X86ISD::VPERMI:
5485 ImmN = N->getOperand(N->getNumOperands()-1);
5486 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5491 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5493 case X86ISD::VPERM2X128:
5494 ImmN = N->getOperand(N->getNumOperands()-1);
5495 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5496 if (Mask.empty()) return false;
5498 case X86ISD::MOVSLDUP:
5499 DecodeMOVSLDUPMask(VT, Mask);
5502 case X86ISD::MOVSHDUP:
5503 DecodeMOVSHDUPMask(VT, Mask);
5506 case X86ISD::MOVDDUP:
5507 DecodeMOVDDUPMask(VT, Mask);
5510 case X86ISD::MOVLHPD:
5511 case X86ISD::MOVLPD:
5512 case X86ISD::MOVLPS:
5513 // Not yet implemented
5515 default: llvm_unreachable("unknown target shuffle node");
5518 // If we have a fake unary shuffle, the shuffle mask is spread across two
5519 // inputs that are actually the same node. Re-map the mask to always point
5520 // into the first input.
5523 if (M >= (int)Mask.size())
5529 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5530 /// element of the result of the vector shuffle.
5531 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5534 return SDValue(); // Limit search depth.
5536 SDValue V = SDValue(N, 0);
5537 EVT VT = V.getValueType();
5538 unsigned Opcode = V.getOpcode();
5540 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5541 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5542 int Elt = SV->getMaskElt(Index);
5545 return DAG.getUNDEF(VT.getVectorElementType());
5547 unsigned NumElems = VT.getVectorNumElements();
5548 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5549 : SV->getOperand(1);
5550 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5553 // Recurse into target specific vector shuffles to find scalars.
5554 if (isTargetShuffle(Opcode)) {
5555 MVT ShufVT = V.getSimpleValueType();
5556 unsigned NumElems = ShufVT.getVectorNumElements();
5557 SmallVector<int, 16> ShuffleMask;
5560 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5563 int Elt = ShuffleMask[Index];
5565 return DAG.getUNDEF(ShufVT.getVectorElementType());
5567 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5569 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5573 // Actual nodes that may contain scalar elements
5574 if (Opcode == ISD::BITCAST) {
5575 V = V.getOperand(0);
5576 EVT SrcVT = V.getValueType();
5577 unsigned NumElems = VT.getVectorNumElements();
5579 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5583 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5584 return (Index == 0) ? V.getOperand(0)
5585 : DAG.getUNDEF(VT.getVectorElementType());
5587 if (V.getOpcode() == ISD::BUILD_VECTOR)
5588 return V.getOperand(Index);
5593 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5594 /// shuffle operation which come from a consecutively from a zero. The
5595 /// search can start in two different directions, from left or right.
5596 /// We count undefs as zeros until PreferredNum is reached.
5597 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5598 unsigned NumElems, bool ZerosFromLeft,
5600 unsigned PreferredNum = -1U) {
5601 unsigned NumZeros = 0;
5602 for (unsigned i = 0; i != NumElems; ++i) {
5603 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5604 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5608 if (X86::isZeroNode(Elt))
5610 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5611 NumZeros = std::min(NumZeros + 1, PreferredNum);
5619 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5620 /// correspond consecutively to elements from one of the vector operands,
5621 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5623 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5624 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5625 unsigned NumElems, unsigned &OpNum) {
5626 bool SeenV1 = false;
5627 bool SeenV2 = false;
5629 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5630 int Idx = SVOp->getMaskElt(i);
5631 // Ignore undef indicies
5635 if (Idx < (int)NumElems)
5640 // Only accept consecutive elements from the same vector
5641 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5645 OpNum = SeenV1 ? 0 : 1;
5649 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5650 /// logical left shift of a vector.
5651 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5652 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5654 SVOp->getSimpleValueType(0).getVectorNumElements();
5655 unsigned NumZeros = getNumOfConsecutiveZeros(
5656 SVOp, NumElems, false /* check zeros from right */, DAG,
5657 SVOp->getMaskElt(0));
5663 // Considering the elements in the mask that are not consecutive zeros,
5664 // check if they consecutively come from only one of the source vectors.
5666 // V1 = {X, A, B, C} 0
5668 // vector_shuffle V1, V2 <1, 2, 3, X>
5670 if (!isShuffleMaskConsecutive(SVOp,
5671 0, // Mask Start Index
5672 NumElems-NumZeros, // Mask End Index(exclusive)
5673 NumZeros, // Where to start looking in the src vector
5674 NumElems, // Number of elements in vector
5675 OpSrc)) // Which source operand ?
5680 ShVal = SVOp->getOperand(OpSrc);
5684 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5685 /// logical left shift of a vector.
5686 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5687 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5689 SVOp->getSimpleValueType(0).getVectorNumElements();
5690 unsigned NumZeros = getNumOfConsecutiveZeros(
5691 SVOp, NumElems, true /* check zeros from left */, DAG,
5692 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5698 // Considering the elements in the mask that are not consecutive zeros,
5699 // check if they consecutively come from only one of the source vectors.
5701 // 0 { A, B, X, X } = V2
5703 // vector_shuffle V1, V2 <X, X, 4, 5>
5705 if (!isShuffleMaskConsecutive(SVOp,
5706 NumZeros, // Mask Start Index
5707 NumElems, // Mask End Index(exclusive)
5708 0, // Where to start looking in the src vector
5709 NumElems, // Number of elements in vector
5710 OpSrc)) // Which source operand ?
5715 ShVal = SVOp->getOperand(OpSrc);
5719 /// isVectorShift - Returns true if the shuffle can be implemented as a
5720 /// logical left or right shift of a vector.
5721 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5722 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5723 // Although the logic below support any bitwidth size, there are no
5724 // shift instructions which handle more than 128-bit vectors.
5725 if (!SVOp->getSimpleValueType(0).is128BitVector())
5728 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5729 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5735 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5737 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5738 unsigned NumNonZero, unsigned NumZero,
5740 const X86Subtarget* Subtarget,
5741 const TargetLowering &TLI) {
5748 for (unsigned i = 0; i < 16; ++i) {
5749 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5750 if (ThisIsNonZero && First) {
5752 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5754 V = DAG.getUNDEF(MVT::v8i16);
5759 SDValue ThisElt, LastElt;
5760 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5761 if (LastIsNonZero) {
5762 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5763 MVT::i16, Op.getOperand(i-1));
5765 if (ThisIsNonZero) {
5766 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5767 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5768 ThisElt, DAG.getConstant(8, MVT::i8));
5770 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5774 if (ThisElt.getNode())
5775 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5776 DAG.getIntPtrConstant(i/2));
5780 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5783 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5785 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5786 unsigned NumNonZero, unsigned NumZero,
5788 const X86Subtarget* Subtarget,
5789 const TargetLowering &TLI) {
5796 for (unsigned i = 0; i < 8; ++i) {
5797 bool isNonZero = (NonZeros & (1 << i)) != 0;
5801 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5803 V = DAG.getUNDEF(MVT::v8i16);
5806 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5807 MVT::v8i16, V, Op.getOperand(i),
5808 DAG.getIntPtrConstant(i));
5815 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5816 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5817 const X86Subtarget *Subtarget,
5818 const TargetLowering &TLI) {
5819 // Find all zeroable elements.
5821 for (int i=0; i < 4; ++i) {
5822 SDValue Elt = Op->getOperand(i);
5823 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5825 assert(std::count_if(&Zeroable[0], &Zeroable[4],
5826 [](bool M) { return !M; }) > 1 &&
5827 "We expect at least two non-zero elements!");
5829 // We only know how to deal with build_vector nodes where elements are either
5830 // zeroable or extract_vector_elt with constant index.
5831 SDValue FirstNonZero;
5832 unsigned FirstNonZeroIdx;
5833 for (unsigned i=0; i < 4; ++i) {
5836 SDValue Elt = Op->getOperand(i);
5837 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5838 !isa<ConstantSDNode>(Elt.getOperand(1)))
5840 // Make sure that this node is extracting from a 128-bit vector.
5841 MVT VT = Elt.getOperand(0).getSimpleValueType();
5842 if (!VT.is128BitVector())
5844 if (!FirstNonZero.getNode()) {
5846 FirstNonZeroIdx = i;
5850 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5851 SDValue V1 = FirstNonZero.getOperand(0);
5852 MVT VT = V1.getSimpleValueType();
5854 // See if this build_vector can be lowered as a blend with zero.
5856 unsigned EltMaskIdx, EltIdx;
5858 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5859 if (Zeroable[EltIdx]) {
5860 // The zero vector will be on the right hand side.
5861 Mask[EltIdx] = EltIdx+4;
5865 Elt = Op->getOperand(EltIdx);
5866 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5867 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5868 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5870 Mask[EltIdx] = EltIdx;
5874 // Let the shuffle legalizer deal with blend operations.
5875 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5876 if (V1.getSimpleValueType() != VT)
5877 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5878 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5881 // See if we can lower this build_vector to a INSERTPS.
5882 if (!Subtarget->hasSSE41())
5885 SDValue V2 = Elt.getOperand(0);
5886 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5889 bool CanFold = true;
5890 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5894 SDValue Current = Op->getOperand(i);
5895 SDValue SrcVector = Current->getOperand(0);
5898 CanFold = SrcVector == V1 &&
5899 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5905 assert(V1.getNode() && "Expected at least two non-zero elements!");
5906 if (V1.getSimpleValueType() != MVT::v4f32)
5907 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5908 if (V2.getSimpleValueType() != MVT::v4f32)
5909 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5911 // Ok, we can emit an INSERTPS instruction.
5913 for (int i = 0; i < 4; ++i)
5917 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5918 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5919 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5920 DAG.getIntPtrConstant(InsertPSMask));
5921 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5924 /// Return a vector logical shift node.
5925 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5926 unsigned NumBits, SelectionDAG &DAG,
5927 const TargetLowering &TLI, SDLoc dl) {
5928 assert(VT.is128BitVector() && "Unknown type for VShift");
5929 MVT ShVT = MVT::v2i64;
5930 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5931 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5932 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
5933 SDValue ShiftVal = DAG.getConstant(NumBits, ScalarShiftTy);
5934 return DAG.getNode(ISD::BITCAST, dl, VT,
5935 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5939 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5941 // Check if the scalar load can be widened into a vector load. And if
5942 // the address is "base + cst" see if the cst can be "absorbed" into
5943 // the shuffle mask.
5944 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5945 SDValue Ptr = LD->getBasePtr();
5946 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5948 EVT PVT = LD->getValueType(0);
5949 if (PVT != MVT::i32 && PVT != MVT::f32)
5954 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5955 FI = FINode->getIndex();
5957 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5958 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5959 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5960 Offset = Ptr.getConstantOperandVal(1);
5961 Ptr = Ptr.getOperand(0);
5966 // FIXME: 256-bit vector instructions don't require a strict alignment,
5967 // improve this code to support it better.
5968 unsigned RequiredAlign = VT.getSizeInBits()/8;
5969 SDValue Chain = LD->getChain();
5970 // Make sure the stack object alignment is at least 16 or 32.
5971 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5972 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5973 if (MFI->isFixedObjectIndex(FI)) {
5974 // Can't change the alignment. FIXME: It's possible to compute
5975 // the exact stack offset and reference FI + adjust offset instead.
5976 // If someone *really* cares about this. That's the way to implement it.
5979 MFI->setObjectAlignment(FI, RequiredAlign);
5983 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5984 // Ptr + (Offset & ~15).
5987 if ((Offset % RequiredAlign) & 3)
5989 int64_t StartOffset = Offset & ~(RequiredAlign-1);
5991 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
5992 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
5994 int EltNo = (Offset - StartOffset) >> 2;
5995 unsigned NumElems = VT.getVectorNumElements();
5997 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
5998 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
5999 LD->getPointerInfo().getWithOffset(StartOffset),
6000 false, false, false, 0);
6002 SmallVector<int, 8> Mask;
6003 for (unsigned i = 0; i != NumElems; ++i)
6004 Mask.push_back(EltNo);
6006 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6012 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6013 /// elements can be replaced by a single large load which has the same value as
6014 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6016 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6018 /// FIXME: we'd also like to handle the case where the last elements are zero
6019 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6020 /// There's even a handy isZeroNode for that purpose.
6021 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6022 SDLoc &DL, SelectionDAG &DAG,
6023 bool isAfterLegalize) {
6024 unsigned NumElems = Elts.size();
6026 LoadSDNode *LDBase = nullptr;
6027 unsigned LastLoadedElt = -1U;
6029 // For each element in the initializer, see if we've found a load or an undef.
6030 // If we don't find an initial load element, or later load elements are
6031 // non-consecutive, bail out.
6032 for (unsigned i = 0; i < NumElems; ++i) {
6033 SDValue Elt = Elts[i];
6034 // Look through a bitcast.
6035 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
6036 Elt = Elt.getOperand(0);
6037 if (!Elt.getNode() ||
6038 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6041 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6043 LDBase = cast<LoadSDNode>(Elt.getNode());
6047 if (Elt.getOpcode() == ISD::UNDEF)
6050 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6051 EVT LdVT = Elt.getValueType();
6052 // Each loaded element must be the correct fractional portion of the
6053 // requested vector load.
6054 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
6056 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
6061 // If we have found an entire vector of loads and undefs, then return a large
6062 // load of the entire vector width starting at the base pointer. If we found
6063 // consecutive loads for the low half, generate a vzext_load node.
6064 if (LastLoadedElt == NumElems - 1) {
6065 assert(LDBase && "Did not find base load for merging consecutive loads");
6066 EVT EltVT = LDBase->getValueType(0);
6067 // Ensure that the input vector size for the merged loads matches the
6068 // cumulative size of the input elements.
6069 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
6072 if (isAfterLegalize &&
6073 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6076 SDValue NewLd = SDValue();
6078 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6079 LDBase->getPointerInfo(), LDBase->isVolatile(),
6080 LDBase->isNonTemporal(), LDBase->isInvariant(),
6081 LDBase->getAlignment());
6083 if (LDBase->hasAnyUseOfValue(1)) {
6084 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6086 SDValue(NewLd.getNode(), 1));
6087 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6088 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6089 SDValue(NewLd.getNode(), 1));
6095 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6096 //of a v4i32 / v4f32. It's probably worth generalizing.
6097 EVT EltVT = VT.getVectorElementType();
6098 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6099 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6100 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6101 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6103 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6104 LDBase->getPointerInfo(),
6105 LDBase->getAlignment(),
6106 false/*isVolatile*/, true/*ReadMem*/,
6109 // Make sure the newly-created LOAD is in the same position as LDBase in
6110 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6111 // update uses of LDBase's output chain to use the TokenFactor.
6112 if (LDBase->hasAnyUseOfValue(1)) {
6113 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6114 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6115 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6116 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6117 SDValue(ResNode.getNode(), 1));
6120 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6125 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6126 /// to generate a splat value for the following cases:
6127 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6128 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6129 /// a scalar load, or a constant.
6130 /// The VBROADCAST node is returned when a pattern is found,
6131 /// or SDValue() otherwise.
6132 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6133 SelectionDAG &DAG) {
6134 // VBROADCAST requires AVX.
6135 // TODO: Splats could be generated for non-AVX CPUs using SSE
6136 // instructions, but there's less potential gain for only 128-bit vectors.
6137 if (!Subtarget->hasAVX())
6140 MVT VT = Op.getSimpleValueType();
6143 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6144 "Unsupported vector type for broadcast.");
6149 switch (Op.getOpcode()) {
6151 // Unknown pattern found.
6154 case ISD::BUILD_VECTOR: {
6155 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6156 BitVector UndefElements;
6157 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6159 // We need a splat of a single value to use broadcast, and it doesn't
6160 // make any sense if the value is only in one element of the vector.
6161 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6165 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6166 Ld.getOpcode() == ISD::ConstantFP);
6168 // Make sure that all of the users of a non-constant load are from the
6169 // BUILD_VECTOR node.
6170 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6175 case ISD::VECTOR_SHUFFLE: {
6176 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6178 // Shuffles must have a splat mask where the first element is
6180 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6183 SDValue Sc = Op.getOperand(0);
6184 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6185 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6187 if (!Subtarget->hasInt256())
6190 // Use the register form of the broadcast instruction available on AVX2.
6191 if (VT.getSizeInBits() >= 256)
6192 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6193 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6196 Ld = Sc.getOperand(0);
6197 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6198 Ld.getOpcode() == ISD::ConstantFP);
6200 // The scalar_to_vector node and the suspected
6201 // load node must have exactly one user.
6202 // Constants may have multiple users.
6204 // AVX-512 has register version of the broadcast
6205 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6206 Ld.getValueType().getSizeInBits() >= 32;
6207 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6214 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6215 bool IsGE256 = (VT.getSizeInBits() >= 256);
6217 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6218 // instruction to save 8 or more bytes of constant pool data.
6219 // TODO: If multiple splats are generated to load the same constant,
6220 // it may be detrimental to overall size. There needs to be a way to detect
6221 // that condition to know if this is truly a size win.
6222 const Function *F = DAG.getMachineFunction().getFunction();
6223 bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
6225 // Handle broadcasting a single constant scalar from the constant pool
6227 // On Sandybridge (no AVX2), it is still better to load a constant vector
6228 // from the constant pool and not to broadcast it from a scalar.
6229 // But override that restriction when optimizing for size.
6230 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6231 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6232 EVT CVT = Ld.getValueType();
6233 assert(!CVT.isVector() && "Must not broadcast a vector type");
6235 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6236 // For size optimization, also splat v2f64 and v2i64, and for size opt
6237 // with AVX2, also splat i8 and i16.
6238 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6239 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6240 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6241 const Constant *C = nullptr;
6242 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6243 C = CI->getConstantIntValue();
6244 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6245 C = CF->getConstantFPValue();
6247 assert(C && "Invalid constant type");
6249 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6250 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6251 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6252 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6253 MachinePointerInfo::getConstantPool(),
6254 false, false, false, Alignment);
6256 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6260 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6262 // Handle AVX2 in-register broadcasts.
6263 if (!IsLoad && Subtarget->hasInt256() &&
6264 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6265 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6267 // The scalar source must be a normal load.
6271 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6272 (Subtarget->hasVLX() && ScalarSize == 64))
6273 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6275 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6276 // double since there is no vbroadcastsd xmm
6277 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6278 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6279 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6282 // Unsupported broadcast.
6286 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6287 /// underlying vector and index.
6289 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6291 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6293 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6294 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6297 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6299 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6301 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6302 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6305 // In this case the vector is the extract_subvector expression and the index
6306 // is 2, as specified by the shuffle.
6307 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6308 SDValue ShuffleVec = SVOp->getOperand(0);
6309 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6310 assert(ShuffleVecVT.getVectorElementType() ==
6311 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6313 int ShuffleIdx = SVOp->getMaskElt(Idx);
6314 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6315 ExtractedFromVec = ShuffleVec;
6321 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6322 MVT VT = Op.getSimpleValueType();
6324 // Skip if insert_vec_elt is not supported.
6325 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6326 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6330 unsigned NumElems = Op.getNumOperands();
6334 SmallVector<unsigned, 4> InsertIndices;
6335 SmallVector<int, 8> Mask(NumElems, -1);
6337 for (unsigned i = 0; i != NumElems; ++i) {
6338 unsigned Opc = Op.getOperand(i).getOpcode();
6340 if (Opc == ISD::UNDEF)
6343 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6344 // Quit if more than 1 elements need inserting.
6345 if (InsertIndices.size() > 1)
6348 InsertIndices.push_back(i);
6352 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6353 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6354 // Quit if non-constant index.
6355 if (!isa<ConstantSDNode>(ExtIdx))
6357 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6359 // Quit if extracted from vector of different type.
6360 if (ExtractedFromVec.getValueType() != VT)
6363 if (!VecIn1.getNode())
6364 VecIn1 = ExtractedFromVec;
6365 else if (VecIn1 != ExtractedFromVec) {
6366 if (!VecIn2.getNode())
6367 VecIn2 = ExtractedFromVec;
6368 else if (VecIn2 != ExtractedFromVec)
6369 // Quit if more than 2 vectors to shuffle
6373 if (ExtractedFromVec == VecIn1)
6375 else if (ExtractedFromVec == VecIn2)
6376 Mask[i] = Idx + NumElems;
6379 if (!VecIn1.getNode())
6382 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6383 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6384 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6385 unsigned Idx = InsertIndices[i];
6386 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6387 DAG.getIntPtrConstant(Idx));
6393 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6395 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6397 MVT VT = Op.getSimpleValueType();
6398 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6399 "Unexpected type in LowerBUILD_VECTORvXi1!");
6402 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6403 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6404 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6405 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6408 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6409 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6410 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6411 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6414 bool AllContants = true;
6415 uint64_t Immediate = 0;
6416 int NonConstIdx = -1;
6417 bool IsSplat = true;
6418 unsigned NumNonConsts = 0;
6419 unsigned NumConsts = 0;
6420 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6421 SDValue In = Op.getOperand(idx);
6422 if (In.getOpcode() == ISD::UNDEF)
6424 if (!isa<ConstantSDNode>(In)) {
6425 AllContants = false;
6430 if (cast<ConstantSDNode>(In)->getZExtValue())
6431 Immediate |= (1ULL << idx);
6433 if (In != Op.getOperand(0))
6438 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6439 DAG.getConstant(Immediate, MVT::i16));
6440 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6441 DAG.getIntPtrConstant(0));
6444 if (NumNonConsts == 1 && NonConstIdx != 0) {
6447 SDValue VecAsImm = DAG.getConstant(Immediate,
6448 MVT::getIntegerVT(VT.getSizeInBits()));
6449 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6452 DstVec = DAG.getUNDEF(VT);
6453 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6454 Op.getOperand(NonConstIdx),
6455 DAG.getIntPtrConstant(NonConstIdx));
6457 if (!IsSplat && (NonConstIdx != 0))
6458 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6459 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6462 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6463 DAG.getConstant(-1, SelectVT),
6464 DAG.getConstant(0, SelectVT));
6466 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6467 DAG.getConstant((Immediate | 1), SelectVT),
6468 DAG.getConstant(Immediate, SelectVT));
6469 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6472 /// \brief Return true if \p N implements a horizontal binop and return the
6473 /// operands for the horizontal binop into V0 and V1.
6475 /// This is a helper function of PerformBUILD_VECTORCombine.
6476 /// This function checks that the build_vector \p N in input implements a
6477 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6478 /// operation to match.
6479 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6480 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6481 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6484 /// This function only analyzes elements of \p N whose indices are
6485 /// in range [BaseIdx, LastIdx).
6486 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6488 unsigned BaseIdx, unsigned LastIdx,
6489 SDValue &V0, SDValue &V1) {
6490 EVT VT = N->getValueType(0);
6492 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6493 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6494 "Invalid Vector in input!");
6496 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6497 bool CanFold = true;
6498 unsigned ExpectedVExtractIdx = BaseIdx;
6499 unsigned NumElts = LastIdx - BaseIdx;
6500 V0 = DAG.getUNDEF(VT);
6501 V1 = DAG.getUNDEF(VT);
6503 // Check if N implements a horizontal binop.
6504 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6505 SDValue Op = N->getOperand(i + BaseIdx);
6508 if (Op->getOpcode() == ISD::UNDEF) {
6509 // Update the expected vector extract index.
6510 if (i * 2 == NumElts)
6511 ExpectedVExtractIdx = BaseIdx;
6512 ExpectedVExtractIdx += 2;
6516 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6521 SDValue Op0 = Op.getOperand(0);
6522 SDValue Op1 = Op.getOperand(1);
6524 // Try to match the following pattern:
6525 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6526 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6527 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6528 Op0.getOperand(0) == Op1.getOperand(0) &&
6529 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6530 isa<ConstantSDNode>(Op1.getOperand(1)));
6534 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6535 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6537 if (i * 2 < NumElts) {
6538 if (V0.getOpcode() == ISD::UNDEF)
6539 V0 = Op0.getOperand(0);
6541 if (V1.getOpcode() == ISD::UNDEF)
6542 V1 = Op0.getOperand(0);
6543 if (i * 2 == NumElts)
6544 ExpectedVExtractIdx = BaseIdx;
6547 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6548 if (I0 == ExpectedVExtractIdx)
6549 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6550 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6551 // Try to match the following dag sequence:
6552 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6553 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6557 ExpectedVExtractIdx += 2;
6563 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6564 /// a concat_vector.
6566 /// This is a helper function of PerformBUILD_VECTORCombine.
6567 /// This function expects two 256-bit vectors called V0 and V1.
6568 /// At first, each vector is split into two separate 128-bit vectors.
6569 /// Then, the resulting 128-bit vectors are used to implement two
6570 /// horizontal binary operations.
6572 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6574 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6575 /// the two new horizontal binop.
6576 /// When Mode is set, the first horizontal binop dag node would take as input
6577 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6578 /// horizontal binop dag node would take as input the lower 128-bit of V1
6579 /// and the upper 128-bit of V1.
6581 /// HADD V0_LO, V0_HI
6582 /// HADD V1_LO, V1_HI
6584 /// Otherwise, the first horizontal binop dag node takes as input the lower
6585 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6586 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6588 /// HADD V0_LO, V1_LO
6589 /// HADD V0_HI, V1_HI
6591 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6592 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6593 /// the upper 128-bits of the result.
6594 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6595 SDLoc DL, SelectionDAG &DAG,
6596 unsigned X86Opcode, bool Mode,
6597 bool isUndefLO, bool isUndefHI) {
6598 EVT VT = V0.getValueType();
6599 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6600 "Invalid nodes in input!");
6602 unsigned NumElts = VT.getVectorNumElements();
6603 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6604 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6605 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6606 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6607 EVT NewVT = V0_LO.getValueType();
6609 SDValue LO = DAG.getUNDEF(NewVT);
6610 SDValue HI = DAG.getUNDEF(NewVT);
6613 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6614 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6615 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6616 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6617 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6619 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6620 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6621 V1_LO->getOpcode() != ISD::UNDEF))
6622 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6624 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6625 V1_HI->getOpcode() != ISD::UNDEF))
6626 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6629 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6632 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6633 /// sequence of 'vadd + vsub + blendi'.
6634 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6635 const X86Subtarget *Subtarget) {
6637 EVT VT = BV->getValueType(0);
6638 unsigned NumElts = VT.getVectorNumElements();
6639 SDValue InVec0 = DAG.getUNDEF(VT);
6640 SDValue InVec1 = DAG.getUNDEF(VT);
6642 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6643 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6645 // Odd-numbered elements in the input build vector are obtained from
6646 // adding two integer/float elements.
6647 // Even-numbered elements in the input build vector are obtained from
6648 // subtracting two integer/float elements.
6649 unsigned ExpectedOpcode = ISD::FSUB;
6650 unsigned NextExpectedOpcode = ISD::FADD;
6651 bool AddFound = false;
6652 bool SubFound = false;
6654 for (unsigned i = 0, e = NumElts; i != e; i++) {
6655 SDValue Op = BV->getOperand(i);
6657 // Skip 'undef' values.
6658 unsigned Opcode = Op.getOpcode();
6659 if (Opcode == ISD::UNDEF) {
6660 std::swap(ExpectedOpcode, NextExpectedOpcode);
6664 // Early exit if we found an unexpected opcode.
6665 if (Opcode != ExpectedOpcode)
6668 SDValue Op0 = Op.getOperand(0);
6669 SDValue Op1 = Op.getOperand(1);
6671 // Try to match the following pattern:
6672 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6673 // Early exit if we cannot match that sequence.
6674 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6675 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6676 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6677 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6678 Op0.getOperand(1) != Op1.getOperand(1))
6681 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6685 // We found a valid add/sub node. Update the information accordingly.
6691 // Update InVec0 and InVec1.
6692 if (InVec0.getOpcode() == ISD::UNDEF)
6693 InVec0 = Op0.getOperand(0);
6694 if (InVec1.getOpcode() == ISD::UNDEF)
6695 InVec1 = Op1.getOperand(0);
6697 // Make sure that operands in input to each add/sub node always
6698 // come from a same pair of vectors.
6699 if (InVec0 != Op0.getOperand(0)) {
6700 if (ExpectedOpcode == ISD::FSUB)
6703 // FADD is commutable. Try to commute the operands
6704 // and then test again.
6705 std::swap(Op0, Op1);
6706 if (InVec0 != Op0.getOperand(0))
6710 if (InVec1 != Op1.getOperand(0))
6713 // Update the pair of expected opcodes.
6714 std::swap(ExpectedOpcode, NextExpectedOpcode);
6717 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6718 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6719 InVec1.getOpcode() != ISD::UNDEF)
6720 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6725 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6726 const X86Subtarget *Subtarget) {
6728 EVT VT = N->getValueType(0);
6729 unsigned NumElts = VT.getVectorNumElements();
6730 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6731 SDValue InVec0, InVec1;
6733 // Try to match an ADDSUB.
6734 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6735 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6736 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6737 if (Value.getNode())
6741 // Try to match horizontal ADD/SUB.
6742 unsigned NumUndefsLO = 0;
6743 unsigned NumUndefsHI = 0;
6744 unsigned Half = NumElts/2;
6746 // Count the number of UNDEF operands in the build_vector in input.
6747 for (unsigned i = 0, e = Half; i != e; ++i)
6748 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6751 for (unsigned i = Half, e = NumElts; i != e; ++i)
6752 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6755 // Early exit if this is either a build_vector of all UNDEFs or all the
6756 // operands but one are UNDEF.
6757 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6760 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6761 // Try to match an SSE3 float HADD/HSUB.
6762 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6763 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6765 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6766 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6767 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6768 // Try to match an SSSE3 integer HADD/HSUB.
6769 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6770 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6772 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6773 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6776 if (!Subtarget->hasAVX())
6779 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6780 // Try to match an AVX horizontal add/sub of packed single/double
6781 // precision floating point values from 256-bit vectors.
6782 SDValue InVec2, InVec3;
6783 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6784 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6785 ((InVec0.getOpcode() == ISD::UNDEF ||
6786 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6787 ((InVec1.getOpcode() == ISD::UNDEF ||
6788 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6789 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6791 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6792 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6793 ((InVec0.getOpcode() == ISD::UNDEF ||
6794 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6795 ((InVec1.getOpcode() == ISD::UNDEF ||
6796 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6797 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6798 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6799 // Try to match an AVX2 horizontal add/sub of signed integers.
6800 SDValue InVec2, InVec3;
6802 bool CanFold = true;
6804 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6805 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6806 ((InVec0.getOpcode() == ISD::UNDEF ||
6807 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6808 ((InVec1.getOpcode() == ISD::UNDEF ||
6809 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6810 X86Opcode = X86ISD::HADD;
6811 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6812 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6813 ((InVec0.getOpcode() == ISD::UNDEF ||
6814 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6815 ((InVec1.getOpcode() == ISD::UNDEF ||
6816 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6817 X86Opcode = X86ISD::HSUB;
6822 // Fold this build_vector into a single horizontal add/sub.
6823 // Do this only if the target has AVX2.
6824 if (Subtarget->hasAVX2())
6825 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6827 // Do not try to expand this build_vector into a pair of horizontal
6828 // add/sub if we can emit a pair of scalar add/sub.
6829 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6832 // Convert this build_vector into a pair of horizontal binop followed by
6834 bool isUndefLO = NumUndefsLO == Half;
6835 bool isUndefHI = NumUndefsHI == Half;
6836 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6837 isUndefLO, isUndefHI);
6841 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6842 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6844 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6845 X86Opcode = X86ISD::HADD;
6846 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6847 X86Opcode = X86ISD::HSUB;
6848 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6849 X86Opcode = X86ISD::FHADD;
6850 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6851 X86Opcode = X86ISD::FHSUB;
6855 // Don't try to expand this build_vector into a pair of horizontal add/sub
6856 // if we can simply emit a pair of scalar add/sub.
6857 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6860 // Convert this build_vector into two horizontal add/sub followed by
6862 bool isUndefLO = NumUndefsLO == Half;
6863 bool isUndefHI = NumUndefsHI == Half;
6864 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6865 isUndefLO, isUndefHI);
6872 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6875 MVT VT = Op.getSimpleValueType();
6876 MVT ExtVT = VT.getVectorElementType();
6877 unsigned NumElems = Op.getNumOperands();
6879 // Generate vectors for predicate vectors.
6880 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6881 return LowerBUILD_VECTORvXi1(Op, DAG);
6883 // Vectors containing all zeros can be matched by pxor and xorps later
6884 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6885 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6886 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6887 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6890 return getZeroVector(VT, Subtarget, DAG, dl);
6893 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6894 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6895 // vpcmpeqd on 256-bit vectors.
6896 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6897 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6900 if (!VT.is512BitVector())
6901 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6904 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6905 if (Broadcast.getNode())
6908 unsigned EVTBits = ExtVT.getSizeInBits();
6910 unsigned NumZero = 0;
6911 unsigned NumNonZero = 0;
6912 unsigned NonZeros = 0;
6913 bool IsAllConstants = true;
6914 SmallSet<SDValue, 8> Values;
6915 for (unsigned i = 0; i < NumElems; ++i) {
6916 SDValue Elt = Op.getOperand(i);
6917 if (Elt.getOpcode() == ISD::UNDEF)
6920 if (Elt.getOpcode() != ISD::Constant &&
6921 Elt.getOpcode() != ISD::ConstantFP)
6922 IsAllConstants = false;
6923 if (X86::isZeroNode(Elt))
6926 NonZeros |= (1 << i);
6931 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6932 if (NumNonZero == 0)
6933 return DAG.getUNDEF(VT);
6935 // Special case for single non-zero, non-undef, element.
6936 if (NumNonZero == 1) {
6937 unsigned Idx = countTrailingZeros(NonZeros);
6938 SDValue Item = Op.getOperand(Idx);
6940 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6941 // the value are obviously zero, truncate the value to i32 and do the
6942 // insertion that way. Only do this if the value is non-constant or if the
6943 // value is a constant being inserted into element 0. It is cheaper to do
6944 // a constant pool load than it is to do a movd + shuffle.
6945 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6946 (!IsAllConstants || Idx == 0)) {
6947 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6949 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6950 EVT VecVT = MVT::v4i32;
6951 unsigned VecElts = 4;
6953 // Truncate the value (which may itself be a constant) to i32, and
6954 // convert it to a vector with movd (S2V+shuffle to zero extend).
6955 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6956 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6958 // If using the new shuffle lowering, just directly insert this.
6959 if (ExperimentalVectorShuffleLowering)
6961 ISD::BITCAST, dl, VT,
6962 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6964 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6966 // Now we have our 32-bit value zero extended in the low element of
6967 // a vector. If Idx != 0, swizzle it into place.
6969 SmallVector<int, 4> Mask;
6970 Mask.push_back(Idx);
6971 for (unsigned i = 1; i != VecElts; ++i)
6973 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6976 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6980 // If we have a constant or non-constant insertion into the low element of
6981 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6982 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6983 // depending on what the source datatype is.
6986 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6988 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6989 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6990 if (VT.is256BitVector() || VT.is512BitVector()) {
6991 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6992 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6993 Item, DAG.getIntPtrConstant(0));
6995 assert(VT.is128BitVector() && "Expected an SSE value type!");
6996 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6997 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
6998 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7001 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7002 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7003 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7004 if (VT.is256BitVector()) {
7005 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7006 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7008 assert(VT.is128BitVector() && "Expected an SSE value type!");
7009 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7011 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7015 // Is it a vector logical left shift?
7016 if (NumElems == 2 && Idx == 1 &&
7017 X86::isZeroNode(Op.getOperand(0)) &&
7018 !X86::isZeroNode(Op.getOperand(1))) {
7019 unsigned NumBits = VT.getSizeInBits();
7020 return getVShift(true, VT,
7021 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7022 VT, Op.getOperand(1)),
7023 NumBits/2, DAG, *this, dl);
7026 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7029 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7030 // is a non-constant being inserted into an element other than the low one,
7031 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7032 // movd/movss) to move this into the low element, then shuffle it into
7034 if (EVTBits == 32) {
7035 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7037 // If using the new shuffle lowering, just directly insert this.
7038 if (ExperimentalVectorShuffleLowering)
7039 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7041 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7042 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7043 SmallVector<int, 8> MaskVec;
7044 for (unsigned i = 0; i != NumElems; ++i)
7045 MaskVec.push_back(i == Idx ? 0 : 1);
7046 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7050 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7051 if (Values.size() == 1) {
7052 if (EVTBits == 32) {
7053 // Instead of a shuffle like this:
7054 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7055 // Check if it's possible to issue this instead.
7056 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7057 unsigned Idx = countTrailingZeros(NonZeros);
7058 SDValue Item = Op.getOperand(Idx);
7059 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7060 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7065 // A vector full of immediates; various special cases are already
7066 // handled, so this is best done with a single constant-pool load.
7070 // For AVX-length vectors, see if we can use a vector load to get all of the
7071 // elements, otherwise build the individual 128-bit pieces and use
7072 // shuffles to put them in place.
7073 if (VT.is256BitVector() || VT.is512BitVector()) {
7074 SmallVector<SDValue, 64> V;
7075 for (unsigned i = 0; i != NumElems; ++i)
7076 V.push_back(Op.getOperand(i));
7078 // Check for a build vector of consecutive loads.
7079 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7082 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7084 // Build both the lower and upper subvector.
7085 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7086 makeArrayRef(&V[0], NumElems/2));
7087 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7088 makeArrayRef(&V[NumElems / 2], NumElems/2));
7090 // Recreate the wider vector with the lower and upper part.
7091 if (VT.is256BitVector())
7092 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7093 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7096 // Let legalizer expand 2-wide build_vectors.
7097 if (EVTBits == 64) {
7098 if (NumNonZero == 1) {
7099 // One half is zero or undef.
7100 unsigned Idx = countTrailingZeros(NonZeros);
7101 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7102 Op.getOperand(Idx));
7103 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7108 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7109 if (EVTBits == 8 && NumElems == 16) {
7110 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7112 if (V.getNode()) return V;
7115 if (EVTBits == 16 && NumElems == 8) {
7116 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7118 if (V.getNode()) return V;
7121 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7122 if (EVTBits == 32 && NumElems == 4) {
7123 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7128 // If element VT is == 32 bits, turn it into a number of shuffles.
7129 SmallVector<SDValue, 8> V(NumElems);
7130 if (NumElems == 4 && NumZero > 0) {
7131 for (unsigned i = 0; i < 4; ++i) {
7132 bool isZero = !(NonZeros & (1 << i));
7134 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7136 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7139 for (unsigned i = 0; i < 2; ++i) {
7140 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7143 V[i] = V[i*2]; // Must be a zero vector.
7146 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7149 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7152 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7157 bool Reverse1 = (NonZeros & 0x3) == 2;
7158 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7162 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7163 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7165 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7168 if (Values.size() > 1 && VT.is128BitVector()) {
7169 // Check for a build vector of consecutive loads.
7170 for (unsigned i = 0; i < NumElems; ++i)
7171 V[i] = Op.getOperand(i);
7173 // Check for elements which are consecutive loads.
7174 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7178 // Check for a build vector from mostly shuffle plus few inserting.
7179 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7183 // For SSE 4.1, use insertps to put the high elements into the low element.
7184 if (Subtarget->hasSSE41()) {
7186 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7187 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7189 Result = DAG.getUNDEF(VT);
7191 for (unsigned i = 1; i < NumElems; ++i) {
7192 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7193 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7194 Op.getOperand(i), DAG.getIntPtrConstant(i));
7199 // Otherwise, expand into a number of unpckl*, start by extending each of
7200 // our (non-undef) elements to the full vector width with the element in the
7201 // bottom slot of the vector (which generates no code for SSE).
7202 for (unsigned i = 0; i < NumElems; ++i) {
7203 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7204 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7206 V[i] = DAG.getUNDEF(VT);
7209 // Next, we iteratively mix elements, e.g. for v4f32:
7210 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7211 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7212 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7213 unsigned EltStride = NumElems >> 1;
7214 while (EltStride != 0) {
7215 for (unsigned i = 0; i < EltStride; ++i) {
7216 // If V[i+EltStride] is undef and this is the first round of mixing,
7217 // then it is safe to just drop this shuffle: V[i] is already in the
7218 // right place, the one element (since it's the first round) being
7219 // inserted as undef can be dropped. This isn't safe for successive
7220 // rounds because they will permute elements within both vectors.
7221 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7222 EltStride == NumElems/2)
7225 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7234 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7235 // to create 256-bit vectors from two other 128-bit ones.
7236 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7238 MVT ResVT = Op.getSimpleValueType();
7240 assert((ResVT.is256BitVector() ||
7241 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7243 SDValue V1 = Op.getOperand(0);
7244 SDValue V2 = Op.getOperand(1);
7245 unsigned NumElems = ResVT.getVectorNumElements();
7246 if(ResVT.is256BitVector())
7247 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7249 if (Op.getNumOperands() == 4) {
7250 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7251 ResVT.getVectorNumElements()/2);
7252 SDValue V3 = Op.getOperand(2);
7253 SDValue V4 = Op.getOperand(3);
7254 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7255 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7257 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7260 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7261 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7262 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7263 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7264 Op.getNumOperands() == 4)));
7266 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7267 // from two other 128-bit ones.
7269 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7270 return LowerAVXCONCAT_VECTORS(Op, DAG);
7274 //===----------------------------------------------------------------------===//
7275 // Vector shuffle lowering
7277 // This is an experimental code path for lowering vector shuffles on x86. It is
7278 // designed to handle arbitrary vector shuffles and blends, gracefully
7279 // degrading performance as necessary. It works hard to recognize idiomatic
7280 // shuffles and lower them to optimal instruction patterns without leaving
7281 // a framework that allows reasonably efficient handling of all vector shuffle
7283 //===----------------------------------------------------------------------===//
7285 /// \brief Tiny helper function to identify a no-op mask.
7287 /// This is a somewhat boring predicate function. It checks whether the mask
7288 /// array input, which is assumed to be a single-input shuffle mask of the kind
7289 /// used by the X86 shuffle instructions (not a fully general
7290 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7291 /// in-place shuffle are 'no-op's.
7292 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7293 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7294 if (Mask[i] != -1 && Mask[i] != i)
7299 /// \brief Helper function to classify a mask as a single-input mask.
7301 /// This isn't a generic single-input test because in the vector shuffle
7302 /// lowering we canonicalize single inputs to be the first input operand. This
7303 /// means we can more quickly test for a single input by only checking whether
7304 /// an input from the second operand exists. We also assume that the size of
7305 /// mask corresponds to the size of the input vectors which isn't true in the
7306 /// fully general case.
7307 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7309 if (M >= (int)Mask.size())
7314 /// \brief Test whether there are elements crossing 128-bit lanes in this
7317 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7318 /// and we routinely test for these.
7319 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7320 int LaneSize = 128 / VT.getScalarSizeInBits();
7321 int Size = Mask.size();
7322 for (int i = 0; i < Size; ++i)
7323 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7328 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7330 /// This checks a shuffle mask to see if it is performing the same
7331 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7332 /// that it is also not lane-crossing. It may however involve a blend from the
7333 /// same lane of a second vector.
7335 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7336 /// non-trivial to compute in the face of undef lanes. The representation is
7337 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7338 /// entries from both V1 and V2 inputs to the wider mask.
7340 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7341 SmallVectorImpl<int> &RepeatedMask) {
7342 int LaneSize = 128 / VT.getScalarSizeInBits();
7343 RepeatedMask.resize(LaneSize, -1);
7344 int Size = Mask.size();
7345 for (int i = 0; i < Size; ++i) {
7348 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7349 // This entry crosses lanes, so there is no way to model this shuffle.
7352 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7353 if (RepeatedMask[i % LaneSize] == -1)
7354 // This is the first non-undef entry in this slot of a 128-bit lane.
7355 RepeatedMask[i % LaneSize] =
7356 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7357 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7358 // Found a mismatch with the repeated mask.
7364 // Hide this symbol with an anonymous namespace instead of 'static' so that MSVC
7365 // 2013 will allow us to use it as a non-type template parameter.
7368 /// \brief Implementation of the \c isShuffleEquivalent variadic functor.
7370 /// See its documentation for details.
7371 bool isShuffleEquivalentImpl(SDValue V1, SDValue V2, ArrayRef<int> Mask,
7372 ArrayRef<const int *> Args) {
7373 if (Mask.size() != Args.size())
7376 // If the values are build vectors, we can look through them to find
7377 // equivalent inputs that make the shuffles equivalent.
7378 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
7379 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
7381 for (int i = 0, e = Mask.size(); i < e; ++i) {
7382 assert(*Args[i] >= 0 && "Arguments must be positive integers!");
7383 if (Mask[i] != -1 && Mask[i] != *Args[i]) {
7384 auto *MaskBV = Mask[i] < e ? BV1 : BV2;
7385 auto *ArgsBV = *Args[i] < e ? BV1 : BV2;
7386 if (!MaskBV || !ArgsBV ||
7387 MaskBV->getOperand(Mask[i] % e) != ArgsBV->getOperand(*Args[i] % e))
7396 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7399 /// This is a fast way to test a shuffle mask against a fixed pattern:
7401 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7403 /// It returns true if the mask is exactly as wide as the argument list, and
7404 /// each element of the mask is either -1 (signifying undef) or the value given
7405 /// in the argument.
7406 static const VariadicFunction3<bool, SDValue, SDValue, ArrayRef<int>, int,
7407 isShuffleEquivalentImpl> isShuffleEquivalent =
7410 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7412 /// This helper function produces an 8-bit shuffle immediate corresponding to
7413 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7414 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7417 /// NB: We rely heavily on "undef" masks preserving the input lane.
7418 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7419 SelectionDAG &DAG) {
7420 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7421 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7422 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7423 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7424 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7427 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7428 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7429 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7430 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7431 return DAG.getConstant(Imm, MVT::i8);
7434 /// \brief Try to emit a blend instruction for a shuffle.
7436 /// This doesn't do any checks for the availability of instructions for blending
7437 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7438 /// be matched in the backend with the type given. What it does check for is
7439 /// that the shuffle mask is in fact a blend.
7440 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7441 SDValue V2, ArrayRef<int> Mask,
7442 const X86Subtarget *Subtarget,
7443 SelectionDAG &DAG) {
7445 unsigned BlendMask = 0;
7446 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7447 if (Mask[i] >= Size) {
7448 if (Mask[i] != i + Size)
7449 return SDValue(); // Shuffled V2 input!
7450 BlendMask |= 1u << i;
7453 if (Mask[i] >= 0 && Mask[i] != i)
7454 return SDValue(); // Shuffled V1 input!
7456 switch (VT.SimpleTy) {
7461 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7462 DAG.getConstant(BlendMask, MVT::i8));
7466 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7470 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7471 // that instruction.
7472 if (Subtarget->hasAVX2()) {
7473 // Scale the blend by the number of 32-bit dwords per element.
7474 int Scale = VT.getScalarSizeInBits() / 32;
7476 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7477 if (Mask[i] >= Size)
7478 for (int j = 0; j < Scale; ++j)
7479 BlendMask |= 1u << (i * Scale + j);
7481 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7482 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7483 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7484 return DAG.getNode(ISD::BITCAST, DL, VT,
7485 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7486 DAG.getConstant(BlendMask, MVT::i8)));
7490 // For integer shuffles we need to expand the mask and cast the inputs to
7491 // v8i16s prior to blending.
7492 int Scale = 8 / VT.getVectorNumElements();
7494 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7495 if (Mask[i] >= Size)
7496 for (int j = 0; j < Scale; ++j)
7497 BlendMask |= 1u << (i * Scale + j);
7499 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7500 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7501 return DAG.getNode(ISD::BITCAST, DL, VT,
7502 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7503 DAG.getConstant(BlendMask, MVT::i8)));
7507 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7508 SmallVector<int, 8> RepeatedMask;
7509 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7510 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7511 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7513 for (int i = 0; i < 8; ++i)
7514 if (RepeatedMask[i] >= 16)
7515 BlendMask |= 1u << i;
7516 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7517 DAG.getConstant(BlendMask, MVT::i8));
7522 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7523 // Scale the blend by the number of bytes per element.
7524 int Scale = VT.getScalarSizeInBits() / 8;
7525 assert(Mask.size() * Scale == 32 && "Not a 256-bit vector!");
7527 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7528 // mix of LLVM's code generator and the x86 backend. We tell the code
7529 // generator that boolean values in the elements of an x86 vector register
7530 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7531 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7532 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7533 // of the element (the remaining are ignored) and 0 in that high bit would
7534 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7535 // the LLVM model for boolean values in vector elements gets the relevant
7536 // bit set, it is set backwards and over constrained relative to x86's
7538 SDValue VSELECTMask[32];
7539 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7540 for (int j = 0; j < Scale; ++j)
7541 VSELECTMask[Scale * i + j] =
7542 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7543 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8);
7545 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1);
7546 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V2);
7548 ISD::BITCAST, DL, VT,
7549 DAG.getNode(ISD::VSELECT, DL, MVT::v32i8,
7550 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, VSELECTMask),
7555 llvm_unreachable("Not a supported integer vector type!");
7559 /// \brief Try to lower as a blend of elements from two inputs followed by
7560 /// a single-input permutation.
7562 /// This matches the pattern where we can blend elements from two inputs and
7563 /// then reduce the shuffle to a single-input permutation.
7564 static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
7567 SelectionDAG &DAG) {
7568 // We build up the blend mask while checking whether a blend is a viable way
7569 // to reduce the shuffle.
7570 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7571 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
7573 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7577 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
7579 if (BlendMask[Mask[i] % Size] == -1)
7580 BlendMask[Mask[i] % Size] = Mask[i];
7581 else if (BlendMask[Mask[i] % Size] != Mask[i])
7582 return SDValue(); // Can't blend in the needed input!
7584 PermuteMask[i] = Mask[i] % Size;
7587 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7588 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
7591 /// \brief Generic routine to lower a shuffle and blend as a decomposed set of
7592 /// unblended shuffles followed by an unshuffled blend.
7594 /// This matches the extremely common pattern for handling combined
7595 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7597 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7601 SelectionDAG &DAG) {
7602 // Shuffle the input elements into the desired positions in V1 and V2 and
7603 // blend them together.
7604 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7605 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7606 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7607 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7608 if (Mask[i] >= 0 && Mask[i] < Size) {
7609 V1Mask[i] = Mask[i];
7611 } else if (Mask[i] >= Size) {
7612 V2Mask[i] = Mask[i] - Size;
7613 BlendMask[i] = i + Size;
7616 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7617 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7618 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7621 /// \brief Try to lower a vector shuffle as a byte rotation.
7623 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7624 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7625 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7626 /// try to generically lower a vector shuffle through such an pattern. It
7627 /// does not check for the profitability of lowering either as PALIGNR or
7628 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7629 /// This matches shuffle vectors that look like:
7631 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7633 /// Essentially it concatenates V1 and V2, shifts right by some number of
7634 /// elements, and takes the low elements as the result. Note that while this is
7635 /// specified as a *right shift* because x86 is little-endian, it is a *left
7636 /// rotate* of the vector lanes.
7638 /// Note that this only handles 128-bit vector widths currently.
7639 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7642 const X86Subtarget *Subtarget,
7643 SelectionDAG &DAG) {
7644 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7646 // We need to detect various ways of spelling a rotation:
7647 // [11, 12, 13, 14, 15, 0, 1, 2]
7648 // [-1, 12, 13, 14, -1, -1, 1, -1]
7649 // [-1, -1, -1, -1, -1, -1, 1, 2]
7650 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7651 // [-1, 4, 5, 6, -1, -1, 9, -1]
7652 // [-1, 4, 5, 6, -1, -1, -1, -1]
7655 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7658 assert(Mask[i] >= 0 && "Only -1 is a valid negative mask element!");
7660 // Based on the mod-Size value of this mask element determine where
7661 // a rotated vector would have started.
7662 int StartIdx = i - (Mask[i] % Size);
7664 // The identity rotation isn't interesting, stop.
7667 // If we found the tail of a vector the rotation must be the missing
7668 // front. If we found the head of a vector, it must be how much of the head.
7669 int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
7672 Rotation = CandidateRotation;
7673 else if (Rotation != CandidateRotation)
7674 // The rotations don't match, so we can't match this mask.
7677 // Compute which value this mask is pointing at.
7678 SDValue MaskV = Mask[i] < Size ? V1 : V2;
7680 // Compute which of the two target values this index should be assigned to.
7681 // This reflects whether the high elements are remaining or the low elements
7683 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7685 // Either set up this value if we've not encountered it before, or check
7686 // that it remains consistent.
7689 else if (TargetV != MaskV)
7690 // This may be a rotation, but it pulls from the inputs in some
7691 // unsupported interleaving.
7695 // Check that we successfully analyzed the mask, and normalize the results.
7696 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7697 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7703 assert(VT.getSizeInBits() == 128 &&
7704 "Rotate-based lowering only supports 128-bit lowering!");
7705 assert(Mask.size() <= 16 &&
7706 "Can shuffle at most 16 bytes in a 128-bit vector!");
7708 // The actual rotate instruction rotates bytes, so we need to scale the
7709 // rotation based on how many bytes are in the vector.
7710 int Scale = 16 / Mask.size();
7712 // SSSE3 targets can use the palignr instruction
7713 if (Subtarget->hasSSSE3()) {
7714 // Cast the inputs to v16i8 to match PALIGNR.
7715 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Lo);
7716 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Hi);
7718 return DAG.getNode(ISD::BITCAST, DL, VT,
7719 DAG.getNode(X86ISD::PALIGNR, DL, MVT::v16i8, Hi, Lo,
7720 DAG.getConstant(Rotation * Scale, MVT::i8)));
7723 // Default SSE2 implementation
7724 int LoByteShift = 16 - Rotation * Scale;
7725 int HiByteShift = Rotation * Scale;
7727 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7728 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7729 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7731 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7732 DAG.getConstant(8 * LoByteShift, MVT::i8));
7733 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7734 DAG.getConstant(8 * HiByteShift, MVT::i8));
7735 return DAG.getNode(ISD::BITCAST, DL, VT,
7736 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7739 /// \brief Compute whether each element of a shuffle is zeroable.
7741 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7742 /// Either it is an undef element in the shuffle mask, the element of the input
7743 /// referenced is undef, or the element of the input referenced is known to be
7744 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7745 /// as many lanes with this technique as possible to simplify the remaining
7747 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7748 SDValue V1, SDValue V2) {
7749 SmallBitVector Zeroable(Mask.size(), false);
7751 while (V1.getOpcode() == ISD::BITCAST)
7752 V1 = V1->getOperand(0);
7753 while (V2.getOpcode() == ISD::BITCAST)
7754 V2 = V2->getOperand(0);
7756 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7757 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7759 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7761 // Handle the easy cases.
7762 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7767 // If this is an index into a build_vector node (which has the same number
7768 // of elements), dig out the input value and use it.
7769 SDValue V = M < Size ? V1 : V2;
7770 if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
7773 SDValue Input = V.getOperand(M % Size);
7774 // The UNDEF opcode check really should be dead code here, but not quite
7775 // worth asserting on (it isn't invalid, just unexpected).
7776 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7783 /// \brief Try to emit a bitmask instruction for a shuffle.
7785 /// This handles cases where we can model a blend exactly as a bitmask due to
7786 /// one of the inputs being zeroable.
7787 static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
7788 SDValue V2, ArrayRef<int> Mask,
7789 SelectionDAG &DAG) {
7790 MVT EltVT = VT.getScalarType();
7791 int NumEltBits = EltVT.getSizeInBits();
7792 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
7793 SDValue Zero = DAG.getConstant(0, IntEltVT);
7794 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
7795 if (EltVT.isFloatingPoint()) {
7796 Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
7797 AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
7799 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
7800 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7802 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7805 if (Mask[i] % Size != i)
7806 return SDValue(); // Not a blend.
7808 V = Mask[i] < Size ? V1 : V2;
7809 else if (V != (Mask[i] < Size ? V1 : V2))
7810 return SDValue(); // Can only let one input through the mask.
7812 VMaskOps[i] = AllOnes;
7815 return SDValue(); // No non-zeroable elements!
7817 SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
7818 V = DAG.getNode(VT.isFloatingPoint()
7819 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
7824 /// \brief Try to lower a vector shuffle as a byte shift (shifts in zeros).
7826 /// Attempts to match a shuffle mask against the PSRLDQ and PSLLDQ SSE2
7827 /// byte-shift instructions. The mask must consist of a shifted sequential
7828 /// shuffle from one of the input vectors and zeroable elements for the
7829 /// remaining 'shifted in' elements.
7831 /// Note that this only handles 128-bit vector widths currently.
7832 static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
7833 SDValue V2, ArrayRef<int> Mask,
7834 SelectionDAG &DAG) {
7835 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7837 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7839 int Size = Mask.size();
7840 int Scale = 16 / Size;
7842 for (int Shift = 1; Shift < Size; Shift++) {
7843 int ByteShift = Shift * Scale;
7845 // PSRLDQ : (little-endian) right byte shift
7846 // [ 5, 6, 7, zz, zz, zz, zz, zz]
7847 // [ -1, 5, 6, 7, zz, zz, zz, zz]
7848 // [ 1, 2, -1, -1, -1, -1, zz, zz]
7849 bool ZeroableRight = true;
7850 for (int i = Size - Shift; i < Size; i++) {
7851 ZeroableRight &= Zeroable[i];
7854 if (ZeroableRight) {
7855 bool ValidShiftRight1 =
7856 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Shift);
7857 bool ValidShiftRight2 =
7858 isSequentialOrUndefInRange(Mask, 0, Size - Shift, Size + Shift);
7860 if (ValidShiftRight1 || ValidShiftRight2) {
7861 // Cast the inputs to v2i64 to match PSRLDQ.
7862 SDValue &TargetV = ValidShiftRight1 ? V1 : V2;
7863 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7864 SDValue Shifted = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, V,
7865 DAG.getConstant(ByteShift * 8, MVT::i8));
7866 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7870 // PSLLDQ : (little-endian) left byte shift
7871 // [ zz, 0, 1, 2, 3, 4, 5, 6]
7872 // [ zz, zz, -1, -1, 2, 3, 4, -1]
7873 // [ zz, zz, zz, zz, zz, zz, -1, 1]
7874 bool ZeroableLeft = true;
7875 for (int i = 0; i < Shift; i++) {
7876 ZeroableLeft &= Zeroable[i];
7880 bool ValidShiftLeft1 =
7881 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, 0);
7882 bool ValidShiftLeft2 =
7883 isSequentialOrUndefInRange(Mask, Shift, Size - Shift, Size);
7885 if (ValidShiftLeft1 || ValidShiftLeft2) {
7886 // Cast the inputs to v2i64 to match PSLLDQ.
7887 SDValue &TargetV = ValidShiftLeft1 ? V1 : V2;
7888 SDValue V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, TargetV);
7889 SDValue Shifted = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, V,
7890 DAG.getConstant(ByteShift * 8, MVT::i8));
7891 return DAG.getNode(ISD::BITCAST, DL, VT, Shifted);
7899 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7901 /// Attempts to match a shuffle mask against the PSRL(W/D/Q) and PSLL(W/D/Q)
7902 /// SSE2 and AVX2 logical bit-shift instructions. The function matches
7903 /// elements from one of the input vectors shuffled to the left or right
7904 /// with zeroable elements 'shifted in'.
7905 static SDValue lowerVectorShuffleAsBitShift(SDLoc DL, MVT VT, SDValue V1,
7906 SDValue V2, ArrayRef<int> Mask,
7907 SelectionDAG &DAG) {
7908 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7910 int Size = Mask.size();
7911 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7913 // PSRL : (little-endian) right bit shift.
7916 // PSHL : (little-endian) left bit shift.
7918 // [ -1, 4, zz, -1 ]
7919 auto MatchBitShift = [&](int Shift, int Scale) -> SDValue {
7920 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7921 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
7922 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
7923 "Illegal integer vector type");
7925 bool MatchLeft = true, MatchRight = true;
7926 for (int i = 0; i != Size; i += Scale) {
7927 for (int j = 0; j != Shift; j++) {
7928 MatchLeft &= Zeroable[i + j];
7930 for (int j = Scale - Shift; j != Scale; j++) {
7931 MatchRight &= Zeroable[i + j];
7934 if (!(MatchLeft || MatchRight))
7937 bool MatchV1 = true, MatchV2 = true;
7938 for (int i = 0; i != Size; i += Scale) {
7939 unsigned Pos = MatchLeft ? i + Shift : i;
7940 unsigned Low = MatchLeft ? i : i + Shift;
7941 unsigned Len = Scale - Shift;
7942 MatchV1 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low);
7943 MatchV2 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low + Size);
7945 if (!(MatchV1 || MatchV2))
7948 // Cast the inputs to ShiftVT to match VSRLI/VSHLI and back again.
7949 unsigned OpCode = MatchLeft ? X86ISD::VSHLI : X86ISD::VSRLI;
7950 int ShiftAmt = Shift * VT.getScalarSizeInBits();
7951 SDValue V = MatchV1 ? V1 : V2;
7952 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7953 V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
7954 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7957 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7958 // keep doubling the size of the integer elements up to that. We can
7959 // then shift the elements of the integer vector by whole multiples of
7960 // their width within the elements of the larger integer vector. Test each
7961 // multiple to see if we can find a match with the moved element indices
7962 // and that the shifted in elements are all zeroable.
7963 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 64; Scale *= 2)
7964 for (int Shift = 1; Shift != Scale; Shift++)
7965 if (SDValue BitShift = MatchBitShift(Shift, Scale))
7972 /// \brief Lower a vector shuffle as a zero or any extension.
7974 /// Given a specific number of elements, element bit width, and extension
7975 /// stride, produce either a zero or any extension based on the available
7976 /// features of the subtarget.
7977 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7978 SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
7979 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7980 assert(Scale > 1 && "Need a scale to extend.");
7981 int NumElements = VT.getVectorNumElements();
7982 int EltBits = VT.getScalarSizeInBits();
7983 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
7984 "Only 8, 16, and 32 bit elements can be extended.");
7985 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
7987 // Found a valid zext mask! Try various lowering strategies based on the
7988 // input type and available ISA extensions.
7989 if (Subtarget->hasSSE41()) {
7990 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
7991 NumElements / Scale);
7992 return DAG.getNode(ISD::BITCAST, DL, VT,
7993 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
7996 // For any extends we can cheat for larger element sizes and use shuffle
7997 // instructions that can fold with a load and/or copy.
7998 if (AnyExt && EltBits == 32) {
7999 int PSHUFDMask[4] = {0, -1, 1, -1};
8001 ISD::BITCAST, DL, VT,
8002 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8003 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8004 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8006 if (AnyExt && EltBits == 16 && Scale > 2) {
8007 int PSHUFDMask[4] = {0, -1, 0, -1};
8008 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8009 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8010 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
8011 int PSHUFHWMask[4] = {1, -1, -1, -1};
8013 ISD::BITCAST, DL, VT,
8014 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
8015 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
8016 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
8019 // If this would require more than 2 unpack instructions to expand, use
8020 // pshufb when available. We can only use more than 2 unpack instructions
8021 // when zero extending i8 elements which also makes it easier to use pshufb.
8022 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
8023 assert(NumElements == 16 && "Unexpected byte vector width!");
8024 SDValue PSHUFBMask[16];
8025 for (int i = 0; i < 16; ++i)
8027 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
8028 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
8029 return DAG.getNode(ISD::BITCAST, DL, VT,
8030 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
8031 DAG.getNode(ISD::BUILD_VECTOR, DL,
8032 MVT::v16i8, PSHUFBMask)));
8035 // Otherwise emit a sequence of unpacks.
8037 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
8038 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
8039 : getZeroVector(InputVT, Subtarget, DAG, DL);
8040 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
8041 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
8045 } while (Scale > 1);
8046 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
8049 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
8051 /// This routine will try to do everything in its power to cleverly lower
8052 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
8053 /// check for the profitability of this lowering, it tries to aggressively
8054 /// match this pattern. It will use all of the micro-architectural details it
8055 /// can to emit an efficient lowering. It handles both blends with all-zero
8056 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
8057 /// masking out later).
8059 /// The reason we have dedicated lowering for zext-style shuffles is that they
8060 /// are both incredibly common and often quite performance sensitive.
8061 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
8062 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8063 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8064 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8066 int Bits = VT.getSizeInBits();
8067 int NumElements = VT.getVectorNumElements();
8068 assert(VT.getScalarSizeInBits() <= 32 &&
8069 "Exceeds 32-bit integer zero extension limit");
8070 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
8072 // Define a helper function to check a particular ext-scale and lower to it if
8074 auto Lower = [&](int Scale) -> SDValue {
8077 for (int i = 0; i < NumElements; ++i) {
8079 continue; // Valid anywhere but doesn't tell us anything.
8080 if (i % Scale != 0) {
8081 // Each of the extended elements need to be zeroable.
8085 // We no longer are in the anyext case.
8090 // Each of the base elements needs to be consecutive indices into the
8091 // same input vector.
8092 SDValue V = Mask[i] < NumElements ? V1 : V2;
8095 else if (InputV != V)
8096 return SDValue(); // Flip-flopping inputs.
8098 if (Mask[i] % NumElements != i / Scale)
8099 return SDValue(); // Non-consecutive strided elements.
8102 // If we fail to find an input, we have a zero-shuffle which should always
8103 // have already been handled.
8104 // FIXME: Maybe handle this here in case during blending we end up with one?
8108 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8109 DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
8112 // The widest scale possible for extending is to a 64-bit integer.
8113 assert(Bits % 64 == 0 &&
8114 "The number of bits in a vector must be divisible by 64 on x86!");
8115 int NumExtElements = Bits / 64;
8117 // Each iteration, try extending the elements half as much, but into twice as
8119 for (; NumExtElements < NumElements; NumExtElements *= 2) {
8120 assert(NumElements % NumExtElements == 0 &&
8121 "The input vector size must be divisible by the extended size.");
8122 if (SDValue V = Lower(NumElements / NumExtElements))
8126 // General extends failed, but 128-bit vectors may be able to use MOVQ.
8130 // Returns one of the source operands if the shuffle can be reduced to a
8131 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
8132 auto CanZExtLowHalf = [&]() {
8133 for (int i = NumElements / 2; i != NumElements; i++)
8136 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
8138 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
8143 if (SDValue V = CanZExtLowHalf()) {
8144 V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
8145 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
8146 return DAG.getNode(ISD::BITCAST, DL, VT, V);
8149 // No viable ext lowering found.
8153 /// \brief Try to get a scalar value for a specific element of a vector.
8155 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
8156 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
8157 SelectionDAG &DAG) {
8158 MVT VT = V.getSimpleValueType();
8159 MVT EltVT = VT.getVectorElementType();
8160 while (V.getOpcode() == ISD::BITCAST)
8161 V = V.getOperand(0);
8162 // If the bitcasts shift the element size, we can't extract an equivalent
8164 MVT NewVT = V.getSimpleValueType();
8165 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8168 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8169 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8170 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8175 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8177 /// This is particularly important because the set of instructions varies
8178 /// significantly based on whether the operand is a load or not.
8179 static bool isShuffleFoldableLoad(SDValue V) {
8180 while (V.getOpcode() == ISD::BITCAST)
8181 V = V.getOperand(0);
8183 return ISD::isNON_EXTLoad(V.getNode());
8186 /// \brief Try to lower insertion of a single element into a zero vector.
8188 /// This is a common pattern that we have especially efficient patterns to lower
8189 /// across all subtarget feature sets.
8190 static SDValue lowerVectorShuffleAsElementInsertion(
8191 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8192 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8193 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8195 MVT EltVT = VT.getVectorElementType();
8197 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8198 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8200 bool IsV1Zeroable = true;
8201 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8202 if (i != V2Index && !Zeroable[i]) {
8203 IsV1Zeroable = false;
8207 // Check for a single input from a SCALAR_TO_VECTOR node.
8208 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8209 // all the smarts here sunk into that routine. However, the current
8210 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8211 // vector shuffle lowering is dead.
8212 if (SDValue V2S = getScalarValueForVectorElement(
8213 V2, Mask[V2Index] - Mask.size(), DAG)) {
8214 // We need to zext the scalar if it is smaller than an i32.
8215 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8216 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8217 // Using zext to expand a narrow element won't work for non-zero
8222 // Zero-extend directly to i32.
8224 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8226 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8227 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8228 EltVT == MVT::i16) {
8229 // Either not inserting from the low element of the input or the input
8230 // element size is too small to use VZEXT_MOVL to clear the high bits.
8234 if (!IsV1Zeroable) {
8235 // If V1 can't be treated as a zero vector we have fewer options to lower
8236 // this. We can't support integer vectors or non-zero targets cheaply, and
8237 // the V1 elements can't be permuted in any way.
8238 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8239 if (!VT.isFloatingPoint() || V2Index != 0)
8241 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8242 V1Mask[V2Index] = -1;
8243 if (!isNoopShuffleMask(V1Mask))
8245 // This is essentially a special case blend operation, but if we have
8246 // general purpose blend operations, they are always faster. Bail and let
8247 // the rest of the lowering handle these as blends.
8248 if (Subtarget->hasSSE41())
8251 // Otherwise, use MOVSD or MOVSS.
8252 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8253 "Only two types of floating point element types to handle!");
8254 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8258 // This lowering only works for the low element with floating point vectors.
8259 if (VT.isFloatingPoint() && V2Index != 0)
8262 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8264 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8267 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8268 // the desired position. Otherwise it is more efficient to do a vector
8269 // shift left. We know that we can do a vector shift left because all
8270 // the inputs are zero.
8271 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8272 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8273 V2Shuffle[V2Index] = 0;
8274 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8276 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8278 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8280 V2Index * EltVT.getSizeInBits(),
8281 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8282 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8288 /// \brief Try to lower broadcast of a single element.
8290 /// For convenience, this code also bundles all of the subtarget feature set
8291 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8292 /// a convenient way to factor it out.
8293 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8295 const X86Subtarget *Subtarget,
8296 SelectionDAG &DAG) {
8297 if (!Subtarget->hasAVX())
8299 if (VT.isInteger() && !Subtarget->hasAVX2())
8302 // Check that the mask is a broadcast.
8303 int BroadcastIdx = -1;
8305 if (M >= 0 && BroadcastIdx == -1)
8307 else if (M >= 0 && M != BroadcastIdx)
8310 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8311 "a sorted mask where the broadcast "
8314 // Go up the chain of (vector) values to try and find a scalar load that
8315 // we can combine with the broadcast.
8317 switch (V.getOpcode()) {
8318 case ISD::CONCAT_VECTORS: {
8319 int OperandSize = Mask.size() / V.getNumOperands();
8320 V = V.getOperand(BroadcastIdx / OperandSize);
8321 BroadcastIdx %= OperandSize;
8325 case ISD::INSERT_SUBVECTOR: {
8326 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8327 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8331 int BeginIdx = (int)ConstantIdx->getZExtValue();
8333 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8334 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8335 BroadcastIdx -= BeginIdx;
8346 // Check if this is a broadcast of a scalar. We special case lowering
8347 // for scalars so that we can more effectively fold with loads.
8348 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8349 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8350 V = V.getOperand(BroadcastIdx);
8352 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8354 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8356 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8357 // We can't broadcast from a vector register w/o AVX2, and we can only
8358 // broadcast from the zero-element of a vector register.
8362 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8365 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8366 // INSERTPS when the V1 elements are already in the correct locations
8367 // because otherwise we can just always use two SHUFPS instructions which
8368 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8369 // perform INSERTPS if a single V1 element is out of place and all V2
8370 // elements are zeroable.
8371 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8373 SelectionDAG &DAG) {
8374 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8375 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8376 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8377 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8379 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8382 int V1DstIndex = -1;
8383 int V2DstIndex = -1;
8384 bool V1UsedInPlace = false;
8386 for (int i = 0; i < 4; i++) {
8387 // Synthesize a zero mask from the zeroable elements (includes undefs).
8393 // Flag if we use any V1 inputs in place.
8395 V1UsedInPlace = true;
8399 // We can only insert a single non-zeroable element.
8400 if (V1DstIndex != -1 || V2DstIndex != -1)
8404 // V1 input out of place for insertion.
8407 // V2 input for insertion.
8412 // Don't bother if we have no (non-zeroable) element for insertion.
8413 if (V1DstIndex == -1 && V2DstIndex == -1)
8416 // Determine element insertion src/dst indices. The src index is from the
8417 // start of the inserted vector, not the start of the concatenated vector.
8418 unsigned V2SrcIndex = 0;
8419 if (V1DstIndex != -1) {
8420 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8421 // and don't use the original V2 at all.
8422 V2SrcIndex = Mask[V1DstIndex];
8423 V2DstIndex = V1DstIndex;
8426 V2SrcIndex = Mask[V2DstIndex] - 4;
8429 // If no V1 inputs are used in place, then the result is created only from
8430 // the zero mask and the V2 insertion - so remove V1 dependency.
8432 V1 = DAG.getUNDEF(MVT::v4f32);
8434 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8435 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8437 // Insert the V2 element into the desired position.
8439 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8440 DAG.getConstant(InsertPSMask, MVT::i8));
8443 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8445 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8446 /// support for floating point shuffles but not integer shuffles. These
8447 /// instructions will incur a domain crossing penalty on some chips though so
8448 /// it is better to avoid lowering through this for integer vectors where
8450 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8451 const X86Subtarget *Subtarget,
8452 SelectionDAG &DAG) {
8454 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8455 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8456 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8457 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8458 ArrayRef<int> Mask = SVOp->getMask();
8459 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8461 if (isSingleInputShuffleMask(Mask)) {
8462 // Use low duplicate instructions for masks that match their pattern.
8463 if (Subtarget->hasSSE3())
8464 if (isShuffleEquivalent(V1, V2, Mask, 0, 0))
8465 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8467 // Straight shuffle of a single input vector. Simulate this by using the
8468 // single input as both of the "inputs" to this instruction..
8469 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8471 if (Subtarget->hasAVX()) {
8472 // If we have AVX, we can use VPERMILPS which will allow folding a load
8473 // into the shuffle.
8474 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8475 DAG.getConstant(SHUFPDMask, MVT::i8));
8478 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8479 DAG.getConstant(SHUFPDMask, MVT::i8));
8481 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8482 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8484 // If we have a single input, insert that into V1 if we can do so cheaply.
8485 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8486 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8487 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8489 // Try inverting the insertion since for v2 masks it is easy to do and we
8490 // can't reliably sort the mask one way or the other.
8491 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8492 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8493 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8494 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8498 // Try to use one of the special instruction patterns to handle two common
8499 // blend patterns if a zero-blend above didn't work.
8500 if (isShuffleEquivalent(V1, V2, Mask, 0, 3) || isShuffleEquivalent(V1, V2, Mask, 1, 3))
8501 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8502 // We can either use a special instruction to load over the low double or
8503 // to move just the low double.
8505 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8507 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8509 if (Subtarget->hasSSE41())
8510 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8514 // Use dedicated unpack instructions for masks that match their pattern.
8515 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8516 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8517 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8518 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8520 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8521 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8522 DAG.getConstant(SHUFPDMask, MVT::i8));
8525 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8527 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8528 /// the integer unit to minimize domain crossing penalties. However, for blends
8529 /// it falls back to the floating point shuffle operation with appropriate bit
8531 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8532 const X86Subtarget *Subtarget,
8533 SelectionDAG &DAG) {
8535 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8536 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8537 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8538 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8539 ArrayRef<int> Mask = SVOp->getMask();
8540 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8542 if (isSingleInputShuffleMask(Mask)) {
8543 // Check for being able to broadcast a single element.
8544 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8545 Mask, Subtarget, DAG))
8548 // Straight shuffle of a single input vector. For everything from SSE2
8549 // onward this has a single fast instruction with no scary immediates.
8550 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8551 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8552 int WidenedMask[4] = {
8553 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8554 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8556 ISD::BITCAST, DL, MVT::v2i64,
8557 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8558 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8561 // Try to use byte shift instructions.
8562 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8563 DL, MVT::v2i64, V1, V2, Mask, DAG))
8566 // If we have a single input from V2 insert that into V1 if we can do so
8568 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8569 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8570 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8572 // Try inverting the insertion since for v2 masks it is easy to do and we
8573 // can't reliably sort the mask one way or the other.
8574 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8575 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8576 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8577 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8581 if (Subtarget->hasSSE41())
8582 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8586 // Use dedicated unpack instructions for masks that match their pattern.
8587 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8588 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8589 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8590 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8592 // Try to use byte rotation instructions.
8593 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8594 if (Subtarget->hasSSSE3())
8595 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8596 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8599 // We implement this with SHUFPD which is pretty lame because it will likely
8600 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8601 // However, all the alternatives are still more cycles and newer chips don't
8602 // have this problem. It would be really nice if x86 had better shuffles here.
8603 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8604 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8605 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8606 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8609 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
8611 /// This is used to disable more specialized lowerings when the shufps lowering
8612 /// will happen to be efficient.
8613 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
8614 // This routine only handles 128-bit shufps.
8615 assert(Mask.size() == 4 && "Unsupported mask size!");
8617 // To lower with a single SHUFPS we need to have the low half and high half
8618 // each requiring a single input.
8619 if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
8621 if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
8627 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8629 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8630 /// It makes no assumptions about whether this is the *best* lowering, it simply
8632 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8633 ArrayRef<int> Mask, SDValue V1,
8634 SDValue V2, SelectionDAG &DAG) {
8635 SDValue LowV = V1, HighV = V2;
8636 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8639 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8641 if (NumV2Elements == 1) {
8643 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8646 // Compute the index adjacent to V2Index and in the same half by toggling
8648 int V2AdjIndex = V2Index ^ 1;
8650 if (Mask[V2AdjIndex] == -1) {
8651 // Handles all the cases where we have a single V2 element and an undef.
8652 // This will only ever happen in the high lanes because we commute the
8653 // vector otherwise.
8655 std::swap(LowV, HighV);
8656 NewMask[V2Index] -= 4;
8658 // Handle the case where the V2 element ends up adjacent to a V1 element.
8659 // To make this work, blend them together as the first step.
8660 int V1Index = V2AdjIndex;
8661 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8662 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8663 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8665 // Now proceed to reconstruct the final blend as we have the necessary
8666 // high or low half formed.
8673 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8674 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8676 } else if (NumV2Elements == 2) {
8677 if (Mask[0] < 4 && Mask[1] < 4) {
8678 // Handle the easy case where we have V1 in the low lanes and V2 in the
8682 } else if (Mask[2] < 4 && Mask[3] < 4) {
8683 // We also handle the reversed case because this utility may get called
8684 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8685 // arrange things in the right direction.
8691 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8692 // trying to place elements directly, just blend them and set up the final
8693 // shuffle to place them.
8695 // The first two blend mask elements are for V1, the second two are for
8697 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8698 Mask[2] < 4 ? Mask[2] : Mask[3],
8699 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8700 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8701 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8702 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8704 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8707 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8708 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8709 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8710 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8713 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8714 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8717 /// \brief Lower 4-lane 32-bit floating point shuffles.
8719 /// Uses instructions exclusively from the floating point unit to minimize
8720 /// domain crossing penalties, as these are sufficient to implement all v4f32
8722 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8723 const X86Subtarget *Subtarget,
8724 SelectionDAG &DAG) {
8726 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8727 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8728 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8729 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8730 ArrayRef<int> Mask = SVOp->getMask();
8731 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8734 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8736 if (NumV2Elements == 0) {
8737 // Check for being able to broadcast a single element.
8738 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8739 Mask, Subtarget, DAG))
8742 // Use even/odd duplicate instructions for masks that match their pattern.
8743 if (Subtarget->hasSSE3()) {
8744 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
8745 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8746 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3))
8747 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8750 if (Subtarget->hasAVX()) {
8751 // If we have AVX, we can use VPERMILPS which will allow folding a load
8752 // into the shuffle.
8753 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8754 getV4X86ShuffleImm8ForMask(Mask, DAG));
8757 // Otherwise, use a straight shuffle of a single input vector. We pass the
8758 // input vector to both operands to simulate this with a SHUFPS.
8759 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8760 getV4X86ShuffleImm8ForMask(Mask, DAG));
8763 // There are special ways we can lower some single-element blends. However, we
8764 // have custom ways we can lower more complex single-element blends below that
8765 // we defer to if both this and BLENDPS fail to match, so restrict this to
8766 // when the V2 input is targeting element 0 of the mask -- that is the fast
8768 if (NumV2Elements == 1 && Mask[0] >= 4)
8769 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8770 Mask, Subtarget, DAG))
8773 if (Subtarget->hasSSE41()) {
8774 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8778 // Use INSERTPS if we can complete the shuffle efficiently.
8779 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8782 if (!isSingleSHUFPSMask(Mask))
8783 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
8784 DL, MVT::v4f32, V1, V2, Mask, DAG))
8788 // Use dedicated unpack instructions for masks that match their pattern.
8789 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8790 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8791 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8792 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8794 // Otherwise fall back to a SHUFPS lowering strategy.
8795 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8798 /// \brief Lower 4-lane i32 vector shuffles.
8800 /// We try to handle these with integer-domain shuffles where we can, but for
8801 /// blends we use the floating point domain blend instructions.
8802 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8803 const X86Subtarget *Subtarget,
8804 SelectionDAG &DAG) {
8806 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8807 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8808 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8809 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8810 ArrayRef<int> Mask = SVOp->getMask();
8811 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8813 // Whenever we can lower this as a zext, that instruction is strictly faster
8814 // than any alternative. It also allows us to fold memory operands into the
8815 // shuffle in many cases.
8816 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8817 Mask, Subtarget, DAG))
8821 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8823 if (NumV2Elements == 0) {
8824 // Check for being able to broadcast a single element.
8825 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8826 Mask, Subtarget, DAG))
8829 // Straight shuffle of a single input vector. For everything from SSE2
8830 // onward this has a single fast instruction with no scary immediates.
8831 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8832 // but we aren't actually going to use the UNPCK instruction because doing
8833 // so prevents folding a load into this instruction or making a copy.
8834 const int UnpackLoMask[] = {0, 0, 1, 1};
8835 const int UnpackHiMask[] = {2, 2, 3, 3};
8836 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 1, 1))
8837 Mask = UnpackLoMask;
8838 else if (isShuffleEquivalent(V1, V2, Mask, 2, 2, 3, 3))
8839 Mask = UnpackHiMask;
8841 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8842 getV4X86ShuffleImm8ForMask(Mask, DAG));
8845 // Try to use bit shift instructions.
8846 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8847 DL, MVT::v4i32, V1, V2, Mask, DAG))
8850 // Try to use byte shift instructions.
8851 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8852 DL, MVT::v4i32, V1, V2, Mask, DAG))
8855 // There are special ways we can lower some single-element blends.
8856 if (NumV2Elements == 1)
8857 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8858 Mask, Subtarget, DAG))
8861 if (Subtarget->hasSSE41())
8862 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8866 if (SDValue Masked =
8867 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
8870 // Use dedicated unpack instructions for masks that match their pattern.
8871 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8872 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
8873 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8874 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
8876 // Try to use byte rotation instructions.
8877 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8878 if (Subtarget->hasSSSE3())
8879 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8880 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
8883 // We implement this with SHUFPS because it can blend from two vectors.
8884 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
8885 // up the inputs, bypassing domain shift penalties that we would encur if we
8886 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
8888 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
8889 DAG.getVectorShuffle(
8891 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
8892 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
8895 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
8896 /// shuffle lowering, and the most complex part.
8898 /// The lowering strategy is to try to form pairs of input lanes which are
8899 /// targeted at the same half of the final vector, and then use a dword shuffle
8900 /// to place them onto the right half, and finally unpack the paired lanes into
8901 /// their final position.
8903 /// The exact breakdown of how to form these dword pairs and align them on the
8904 /// correct sides is really tricky. See the comments within the function for
8905 /// more of the details.
8906 static SDValue lowerV8I16SingleInputVectorShuffle(
8907 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
8908 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8909 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
8910 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
8911 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
8913 SmallVector<int, 4> LoInputs;
8914 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
8915 [](int M) { return M >= 0; });
8916 std::sort(LoInputs.begin(), LoInputs.end());
8917 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
8918 SmallVector<int, 4> HiInputs;
8919 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
8920 [](int M) { return M >= 0; });
8921 std::sort(HiInputs.begin(), HiInputs.end());
8922 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
8924 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
8925 int NumHToL = LoInputs.size() - NumLToL;
8927 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
8928 int NumHToH = HiInputs.size() - NumLToH;
8929 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
8930 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
8931 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
8932 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
8934 // Check for being able to broadcast a single element.
8935 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
8936 Mask, Subtarget, DAG))
8939 // Try to use bit shift instructions.
8940 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8941 DL, MVT::v8i16, V, V, Mask, DAG))
8944 // Try to use byte shift instructions.
8945 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8946 DL, MVT::v8i16, V, V, Mask, DAG))
8949 // Use dedicated unpack instructions for masks that match their pattern.
8950 if (isShuffleEquivalent(V, V, Mask, 0, 0, 1, 1, 2, 2, 3, 3))
8951 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
8952 if (isShuffleEquivalent(V, V, Mask, 4, 4, 5, 5, 6, 6, 7, 7))
8953 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
8955 // Try to use byte rotation instructions.
8956 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8957 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
8960 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
8961 // such inputs we can swap two of the dwords across the half mark and end up
8962 // with <=2 inputs to each half in each half. Once there, we can fall through
8963 // to the generic code below. For example:
8965 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8966 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
8968 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
8969 // and an existing 2-into-2 on the other half. In this case we may have to
8970 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
8971 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
8972 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
8973 // because any other situation (including a 3-into-1 or 1-into-3 in the other
8974 // half than the one we target for fixing) will be fixed when we re-enter this
8975 // path. We will also combine away any sequence of PSHUFD instructions that
8976 // result into a single instruction. Here is an example of the tricky case:
8978 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8979 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
8981 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
8983 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
8984 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
8986 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
8987 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
8989 // The result is fine to be handled by the generic logic.
8990 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
8991 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
8992 int AOffset, int BOffset) {
8993 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
8994 "Must call this with A having 3 or 1 inputs from the A half.");
8995 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
8996 "Must call this with B having 1 or 3 inputs from the B half.");
8997 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
8998 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
9000 // Compute the index of dword with only one word among the three inputs in
9001 // a half by taking the sum of the half with three inputs and subtracting
9002 // the sum of the actual three inputs. The difference is the remaining
9005 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
9006 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
9007 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
9008 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
9009 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
9010 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
9011 int TripleNonInputIdx =
9012 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
9013 TripleDWord = TripleNonInputIdx / 2;
9015 // We use xor with one to compute the adjacent DWord to whichever one the
9017 OneInputDWord = (OneInput / 2) ^ 1;
9019 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
9020 // and BToA inputs. If there is also such a problem with the BToB and AToB
9021 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
9022 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
9023 // is essential that we don't *create* a 3<-1 as then we might oscillate.
9024 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
9025 // Compute how many inputs will be flipped by swapping these DWords. We
9027 // to balance this to ensure we don't form a 3-1 shuffle in the other
9029 int NumFlippedAToBInputs =
9030 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
9031 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
9032 int NumFlippedBToBInputs =
9033 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
9034 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
9035 if ((NumFlippedAToBInputs == 1 &&
9036 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
9037 (NumFlippedBToBInputs == 1 &&
9038 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
9039 // We choose whether to fix the A half or B half based on whether that
9040 // half has zero flipped inputs. At zero, we may not be able to fix it
9041 // with that half. We also bias towards fixing the B half because that
9042 // will more commonly be the high half, and we have to bias one way.
9043 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
9044 ArrayRef<int> Inputs) {
9045 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
9046 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
9047 PinnedIdx ^ 1) != Inputs.end();
9048 // Determine whether the free index is in the flipped dword or the
9049 // unflipped dword based on where the pinned index is. We use this bit
9050 // in an xor to conditionally select the adjacent dword.
9051 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
9052 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9053 FixFreeIdx) != Inputs.end();
9054 if (IsFixIdxInput == IsFixFreeIdxInput)
9056 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9057 FixFreeIdx) != Inputs.end();
9058 assert(IsFixIdxInput != IsFixFreeIdxInput &&
9059 "We need to be changing the number of flipped inputs!");
9060 int PSHUFHalfMask[] = {0, 1, 2, 3};
9061 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
9062 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
9064 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
9067 if (M != -1 && M == FixIdx)
9069 else if (M != -1 && M == FixFreeIdx)
9072 if (NumFlippedBToBInputs != 0) {
9074 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9075 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
9077 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
9079 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9080 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
9085 int PSHUFDMask[] = {0, 1, 2, 3};
9086 PSHUFDMask[ADWord] = BDWord;
9087 PSHUFDMask[BDWord] = ADWord;
9088 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9089 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9090 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9091 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9093 // Adjust the mask to match the new locations of A and B.
9095 if (M != -1 && M/2 == ADWord)
9096 M = 2 * BDWord + M % 2;
9097 else if (M != -1 && M/2 == BDWord)
9098 M = 2 * ADWord + M % 2;
9100 // Recurse back into this routine to re-compute state now that this isn't
9101 // a 3 and 1 problem.
9102 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9105 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
9106 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
9107 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
9108 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
9110 // At this point there are at most two inputs to the low and high halves from
9111 // each half. That means the inputs can always be grouped into dwords and
9112 // those dwords can then be moved to the correct half with a dword shuffle.
9113 // We use at most one low and one high word shuffle to collect these paired
9114 // inputs into dwords, and finally a dword shuffle to place them.
9115 int PSHUFLMask[4] = {-1, -1, -1, -1};
9116 int PSHUFHMask[4] = {-1, -1, -1, -1};
9117 int PSHUFDMask[4] = {-1, -1, -1, -1};
9119 // First fix the masks for all the inputs that are staying in their
9120 // original halves. This will then dictate the targets of the cross-half
9122 auto fixInPlaceInputs =
9123 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9124 MutableArrayRef<int> SourceHalfMask,
9125 MutableArrayRef<int> HalfMask, int HalfOffset) {
9126 if (InPlaceInputs.empty())
9128 if (InPlaceInputs.size() == 1) {
9129 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9130 InPlaceInputs[0] - HalfOffset;
9131 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9134 if (IncomingInputs.empty()) {
9135 // Just fix all of the in place inputs.
9136 for (int Input : InPlaceInputs) {
9137 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9138 PSHUFDMask[Input / 2] = Input / 2;
9143 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9144 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9145 InPlaceInputs[0] - HalfOffset;
9146 // Put the second input next to the first so that they are packed into
9147 // a dword. We find the adjacent index by toggling the low bit.
9148 int AdjIndex = InPlaceInputs[0] ^ 1;
9149 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9150 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9151 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9153 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9154 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9156 // Now gather the cross-half inputs and place them into a free dword of
9157 // their target half.
9158 // FIXME: This operation could almost certainly be simplified dramatically to
9159 // look more like the 3-1 fixing operation.
9160 auto moveInputsToRightHalf = [&PSHUFDMask](
9161 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9162 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9163 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9165 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9166 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
9168 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9170 int LowWord = Word & ~1;
9171 int HighWord = Word | 1;
9172 return isWordClobbered(SourceHalfMask, LowWord) ||
9173 isWordClobbered(SourceHalfMask, HighWord);
9176 if (IncomingInputs.empty())
9179 if (ExistingInputs.empty()) {
9180 // Map any dwords with inputs from them into the right half.
9181 for (int Input : IncomingInputs) {
9182 // If the source half mask maps over the inputs, turn those into
9183 // swaps and use the swapped lane.
9184 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9185 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
9186 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9187 Input - SourceOffset;
9188 // We have to swap the uses in our half mask in one sweep.
9189 for (int &M : HalfMask)
9190 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9192 else if (M == Input)
9193 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9195 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9196 Input - SourceOffset &&
9197 "Previous placement doesn't match!");
9199 // Note that this correctly re-maps both when we do a swap and when
9200 // we observe the other side of the swap above. We rely on that to
9201 // avoid swapping the members of the input list directly.
9202 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9205 // Map the input's dword into the correct half.
9206 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9207 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9209 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9211 "Previous placement doesn't match!");
9214 // And just directly shift any other-half mask elements to be same-half
9215 // as we will have mirrored the dword containing the element into the
9216 // same position within that half.
9217 for (int &M : HalfMask)
9218 if (M >= SourceOffset && M < SourceOffset + 4) {
9219 M = M - SourceOffset + DestOffset;
9220 assert(M >= 0 && "This should never wrap below zero!");
9225 // Ensure we have the input in a viable dword of its current half. This
9226 // is particularly tricky because the original position may be clobbered
9227 // by inputs being moved and *staying* in that half.
9228 if (IncomingInputs.size() == 1) {
9229 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9230 int InputFixed = std::find(std::begin(SourceHalfMask),
9231 std::end(SourceHalfMask), -1) -
9232 std::begin(SourceHalfMask) + SourceOffset;
9233 SourceHalfMask[InputFixed - SourceOffset] =
9234 IncomingInputs[0] - SourceOffset;
9235 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9237 IncomingInputs[0] = InputFixed;
9239 } else if (IncomingInputs.size() == 2) {
9240 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9241 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9242 // We have two non-adjacent or clobbered inputs we need to extract from
9243 // the source half. To do this, we need to map them into some adjacent
9244 // dword slot in the source mask.
9245 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9246 IncomingInputs[1] - SourceOffset};
9248 // If there is a free slot in the source half mask adjacent to one of
9249 // the inputs, place the other input in it. We use (Index XOR 1) to
9250 // compute an adjacent index.
9251 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9252 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9253 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9254 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9255 InputsFixed[1] = InputsFixed[0] ^ 1;
9256 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9257 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9258 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9259 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9260 InputsFixed[0] = InputsFixed[1] ^ 1;
9261 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9262 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9263 // The two inputs are in the same DWord but it is clobbered and the
9264 // adjacent DWord isn't used at all. Move both inputs to the free
9266 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9267 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9268 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9269 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9271 // The only way we hit this point is if there is no clobbering
9272 // (because there are no off-half inputs to this half) and there is no
9273 // free slot adjacent to one of the inputs. In this case, we have to
9274 // swap an input with a non-input.
9275 for (int i = 0; i < 4; ++i)
9276 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9277 "We can't handle any clobbers here!");
9278 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9279 "Cannot have adjacent inputs here!");
9281 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9282 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9284 // We also have to update the final source mask in this case because
9285 // it may need to undo the above swap.
9286 for (int &M : FinalSourceHalfMask)
9287 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9288 M = InputsFixed[1] + SourceOffset;
9289 else if (M == InputsFixed[1] + SourceOffset)
9290 M = (InputsFixed[0] ^ 1) + SourceOffset;
9292 InputsFixed[1] = InputsFixed[0] ^ 1;
9295 // Point everything at the fixed inputs.
9296 for (int &M : HalfMask)
9297 if (M == IncomingInputs[0])
9298 M = InputsFixed[0] + SourceOffset;
9299 else if (M == IncomingInputs[1])
9300 M = InputsFixed[1] + SourceOffset;
9302 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9303 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9306 llvm_unreachable("Unhandled input size!");
9309 // Now hoist the DWord down to the right half.
9310 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9311 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9312 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9313 for (int &M : HalfMask)
9314 for (int Input : IncomingInputs)
9316 M = FreeDWord * 2 + Input % 2;
9318 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9319 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9320 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9321 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9323 // Now enact all the shuffles we've computed to move the inputs into their
9325 if (!isNoopShuffleMask(PSHUFLMask))
9326 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9327 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9328 if (!isNoopShuffleMask(PSHUFHMask))
9329 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9330 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9331 if (!isNoopShuffleMask(PSHUFDMask))
9332 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9333 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9334 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9335 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9337 // At this point, each half should contain all its inputs, and we can then
9338 // just shuffle them into their final position.
9339 assert(std::count_if(LoMask.begin(), LoMask.end(),
9340 [](int M) { return M >= 4; }) == 0 &&
9341 "Failed to lift all the high half inputs to the low mask!");
9342 assert(std::count_if(HiMask.begin(), HiMask.end(),
9343 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9344 "Failed to lift all the low half inputs to the high mask!");
9346 // Do a half shuffle for the low mask.
9347 if (!isNoopShuffleMask(LoMask))
9348 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9349 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9351 // Do a half shuffle with the high mask after shifting its values down.
9352 for (int &M : HiMask)
9355 if (!isNoopShuffleMask(HiMask))
9356 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9357 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9362 /// \brief Detect whether the mask pattern should be lowered through
9365 /// This essentially tests whether viewing the mask as an interleaving of two
9366 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9367 /// lowering it through interleaving is a significantly better strategy.
9368 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9369 int NumEvenInputs[2] = {0, 0};
9370 int NumOddInputs[2] = {0, 0};
9371 int NumLoInputs[2] = {0, 0};
9372 int NumHiInputs[2] = {0, 0};
9373 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9377 int InputIdx = Mask[i] >= Size;
9380 ++NumLoInputs[InputIdx];
9382 ++NumHiInputs[InputIdx];
9385 ++NumEvenInputs[InputIdx];
9387 ++NumOddInputs[InputIdx];
9390 // The minimum number of cross-input results for both the interleaved and
9391 // split cases. If interleaving results in fewer cross-input results, return
9393 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9394 NumEvenInputs[0] + NumOddInputs[1]);
9395 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9396 NumLoInputs[0] + NumHiInputs[1]);
9397 return InterleavedCrosses < SplitCrosses;
9400 /// \brief Blend two v8i16 vectors using a naive unpack strategy.
9402 /// This strategy only works when the inputs from each vector fit into a single
9403 /// half of that vector, and generally there are not so many inputs as to leave
9404 /// the in-place shuffles required highly constrained (and thus expensive). It
9405 /// shifts all the inputs into a single side of both input vectors and then
9406 /// uses an unpack to interleave these inputs in a single vector. At that
9407 /// point, we will fall back on the generic single input shuffle lowering.
9408 static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
9410 MutableArrayRef<int> Mask,
9411 const X86Subtarget *Subtarget,
9412 SelectionDAG &DAG) {
9413 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9414 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9415 SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
9416 for (int i = 0; i < 8; ++i)
9417 if (Mask[i] >= 0 && Mask[i] < 4)
9418 LoV1Inputs.push_back(i);
9419 else if (Mask[i] >= 4 && Mask[i] < 8)
9420 HiV1Inputs.push_back(i);
9421 else if (Mask[i] >= 8 && Mask[i] < 12)
9422 LoV2Inputs.push_back(i);
9423 else if (Mask[i] >= 12)
9424 HiV2Inputs.push_back(i);
9426 int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
9427 int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
9430 assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
9431 assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
9432 assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
9434 bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
9435 HiV1Inputs.size() + HiV2Inputs.size();
9437 auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
9438 ArrayRef<int> HiInputs, bool MoveToLo,
9440 ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
9441 ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
9442 if (BadInputs.empty())
9445 int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9446 int MoveOffset = MoveToLo ? 0 : 4;
9448 if (GoodInputs.empty()) {
9449 for (int BadInput : BadInputs) {
9450 MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
9451 Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
9454 if (GoodInputs.size() == 2) {
9455 // If the low inputs are spread across two dwords, pack them into
9457 MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
9458 MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
9459 Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
9460 Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
9462 // Otherwise pin the good inputs.
9463 for (int GoodInput : GoodInputs)
9464 MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
9467 if (BadInputs.size() == 2) {
9468 // If we have two bad inputs then there may be either one or two good
9469 // inputs fixed in place. Find a fixed input, and then find the *other*
9470 // two adjacent indices by using modular arithmetic.
9472 std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
9473 [](int M) { return M >= 0; }) -
9474 std::begin(MoveMask);
9476 ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
9477 assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
9478 assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
9479 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9480 MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
9481 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9482 Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
9484 assert(BadInputs.size() == 1 && "All sizes handled");
9485 int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
9486 std::end(MoveMask), -1) -
9487 std::begin(MoveMask);
9488 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9489 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9493 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9496 V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
9498 V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
9501 // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
9502 // cross-half traffic in the final shuffle.
9504 // Munge the mask to be a single-input mask after the unpack merges the
9508 M = 2 * (M % 4) + (M / 8);
9510 return DAG.getVectorShuffle(
9511 MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
9512 DL, MVT::v8i16, V1, V2),
9513 DAG.getUNDEF(MVT::v8i16), Mask);
9516 /// \brief Generic lowering of 8-lane i16 shuffles.
9518 /// This handles both single-input shuffles and combined shuffle/blends with
9519 /// two inputs. The single input shuffles are immediately delegated to
9520 /// a dedicated lowering routine.
9522 /// The blends are lowered in one of three fundamental ways. If there are few
9523 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9524 /// of the input is significantly cheaper when lowered as an interleaving of
9525 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9526 /// halves of the inputs separately (making them have relatively few inputs)
9527 /// and then concatenate them.
9528 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9529 const X86Subtarget *Subtarget,
9530 SelectionDAG &DAG) {
9532 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9533 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9534 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9535 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9536 ArrayRef<int> OrigMask = SVOp->getMask();
9537 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9538 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9539 MutableArrayRef<int> Mask(MaskStorage);
9541 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9543 // Whenever we can lower this as a zext, that instruction is strictly faster
9544 // than any alternative.
9545 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9546 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9549 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9550 auto isV2 = [](int M) { return M >= 8; };
9552 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9553 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9555 if (NumV2Inputs == 0)
9556 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9558 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9559 "to be V1-input shuffles.");
9561 // Try to use bit shift instructions.
9562 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9563 DL, MVT::v8i16, V1, V2, Mask, DAG))
9566 // Try to use byte shift instructions.
9567 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9568 DL, MVT::v8i16, V1, V2, Mask, DAG))
9571 // There are special ways we can lower some single-element blends.
9572 if (NumV2Inputs == 1)
9573 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9574 Mask, Subtarget, DAG))
9577 if (Subtarget->hasSSE41())
9578 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9582 if (SDValue Masked =
9583 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
9586 // Use dedicated unpack instructions for masks that match their pattern.
9587 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9588 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9589 if (isShuffleEquivalent(V1, V2, Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9590 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9592 // Try to use byte rotation instructions.
9593 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9594 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9597 if (NumV1Inputs + NumV2Inputs <= 4)
9598 return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
9600 // Check whether an interleaving lowering is likely to be more efficient.
9601 // This isn't perfect but it is a strong heuristic that tends to work well on
9602 // the kinds of shuffles that show up in practice.
9604 // FIXME: Handle 1x, 2x, and 4x interleaving.
9605 if (shouldLowerAsInterleaving(Mask)) {
9606 // FIXME: Figure out whether we should pack these into the low or high
9609 int EMask[8], OMask[8];
9610 for (int i = 0; i < 4; ++i) {
9611 EMask[i] = Mask[2*i];
9612 OMask[i] = Mask[2*i + 1];
9617 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9618 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9620 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9623 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9624 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9626 for (int i = 0; i < 4; ++i) {
9627 LoBlendMask[i] = Mask[i];
9628 HiBlendMask[i] = Mask[i + 4];
9631 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9632 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9633 LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
9634 HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
9636 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9637 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
9640 /// \brief Check whether a compaction lowering can be done by dropping even
9641 /// elements and compute how many times even elements must be dropped.
9643 /// This handles shuffles which take every Nth element where N is a power of
9644 /// two. Example shuffle masks:
9646 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9647 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9648 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9649 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9650 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9651 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9653 /// Any of these lanes can of course be undef.
9655 /// This routine only supports N <= 3.
9656 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9659 /// \returns N above, or the number of times even elements must be dropped if
9660 /// there is such a number. Otherwise returns zero.
9661 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9662 // Figure out whether we're looping over two inputs or just one.
9663 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9665 // The modulus for the shuffle vector entries is based on whether this is
9666 // a single input or not.
9667 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9668 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9669 "We should only be called with masks with a power-of-2 size!");
9671 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9673 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9674 // and 2^3 simultaneously. This is because we may have ambiguity with
9675 // partially undef inputs.
9676 bool ViableForN[3] = {true, true, true};
9678 for (int i = 0, e = Mask.size(); i < e; ++i) {
9679 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9684 bool IsAnyViable = false;
9685 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9686 if (ViableForN[j]) {
9689 // The shuffle mask must be equal to (i * 2^N) % M.
9690 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9693 ViableForN[j] = false;
9695 // Early exit if we exhaust the possible powers of two.
9700 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9704 // Return 0 as there is no viable power of two.
9708 /// \brief Generic lowering of v16i8 shuffles.
9710 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9711 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9712 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9713 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9715 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9716 const X86Subtarget *Subtarget,
9717 SelectionDAG &DAG) {
9719 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9720 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9721 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9722 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9723 ArrayRef<int> OrigMask = SVOp->getMask();
9724 assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9726 // Try to use bit shift instructions.
9727 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9728 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9731 // Try to use byte shift instructions.
9732 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9733 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9736 // Try to use byte rotation instructions.
9737 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9738 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9741 // Try to use a zext lowering.
9742 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9743 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9746 int MaskStorage[16] = {
9747 OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9748 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
9749 OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
9750 OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
9751 MutableArrayRef<int> Mask(MaskStorage);
9752 MutableArrayRef<int> LoMask = Mask.slice(0, 8);
9753 MutableArrayRef<int> HiMask = Mask.slice(8, 8);
9756 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9758 // For single-input shuffles, there are some nicer lowering tricks we can use.
9759 if (NumV2Elements == 0) {
9760 // Check for being able to broadcast a single element.
9761 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9762 Mask, Subtarget, DAG))
9765 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9766 // Notably, this handles splat and partial-splat shuffles more efficiently.
9767 // However, it only makes sense if the pre-duplication shuffle simplifies
9768 // things significantly. Currently, this means we need to be able to
9769 // express the pre-duplication shuffle as an i16 shuffle.
9771 // FIXME: We should check for other patterns which can be widened into an
9772 // i16 shuffle as well.
9773 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9774 for (int i = 0; i < 16; i += 2)
9775 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9780 auto tryToWidenViaDuplication = [&]() -> SDValue {
9781 if (!canWidenViaDuplication(Mask))
9783 SmallVector<int, 4> LoInputs;
9784 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9785 [](int M) { return M >= 0 && M < 8; });
9786 std::sort(LoInputs.begin(), LoInputs.end());
9787 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9789 SmallVector<int, 4> HiInputs;
9790 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9791 [](int M) { return M >= 8; });
9792 std::sort(HiInputs.begin(), HiInputs.end());
9793 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9796 bool TargetLo = LoInputs.size() >= HiInputs.size();
9797 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9798 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9800 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9801 SmallDenseMap<int, int, 8> LaneMap;
9802 for (int I : InPlaceInputs) {
9803 PreDupI16Shuffle[I/2] = I/2;
9806 int j = TargetLo ? 0 : 4, je = j + 4;
9807 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9808 // Check if j is already a shuffle of this input. This happens when
9809 // there are two adjacent bytes after we move the low one.
9810 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9811 // If we haven't yet mapped the input, search for a slot into which
9813 while (j < je && PreDupI16Shuffle[j] != -1)
9817 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9820 // Map this input with the i16 shuffle.
9821 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9824 // Update the lane map based on the mapping we ended up with.
9825 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9828 ISD::BITCAST, DL, MVT::v16i8,
9829 DAG.getVectorShuffle(MVT::v8i16, DL,
9830 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9831 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9833 // Unpack the bytes to form the i16s that will be shuffled into place.
9834 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9835 MVT::v16i8, V1, V1);
9837 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9838 for (int i = 0; i < 16; ++i)
9839 if (Mask[i] != -1) {
9840 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9841 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9842 if (PostDupI16Shuffle[i / 2] == -1)
9843 PostDupI16Shuffle[i / 2] = MappedMask;
9845 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9846 "Conflicting entrties in the original shuffle!");
9849 ISD::BITCAST, DL, MVT::v16i8,
9850 DAG.getVectorShuffle(MVT::v8i16, DL,
9851 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9852 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9854 if (SDValue V = tryToWidenViaDuplication())
9858 // Check whether an interleaving lowering is likely to be more efficient.
9859 // This isn't perfect but it is a strong heuristic that tends to work well on
9860 // the kinds of shuffles that show up in practice.
9862 // FIXME: We need to handle other interleaving widths (i16, i32, ...).
9863 if (shouldLowerAsInterleaving(Mask)) {
9864 int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9865 return (M >= 0 && M < 8) || (M >= 16 && M < 24);
9867 int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9868 return (M >= 8 && M < 16) || M >= 24;
9870 int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9871 -1, -1, -1, -1, -1, -1, -1, -1};
9872 int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9873 -1, -1, -1, -1, -1, -1, -1, -1};
9874 bool UnpackLo = NumLoHalf >= NumHiHalf;
9875 MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
9876 MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
9877 for (int i = 0; i < 8; ++i) {
9878 TargetEMask[i] = Mask[2 * i];
9879 TargetOMask[i] = Mask[2 * i + 1];
9882 SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
9883 SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
9885 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9886 MVT::v16i8, Evens, Odds);
9889 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9890 // with PSHUFB. It is important to do this before we attempt to generate any
9891 // blends but after all of the single-input lowerings. If the single input
9892 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9893 // want to preserve that and we can DAG combine any longer sequences into
9894 // a PSHUFB in the end. But once we start blending from multiple inputs,
9895 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9896 // and there are *very* few patterns that would actually be faster than the
9897 // PSHUFB approach because of its ability to zero lanes.
9899 // FIXME: The only exceptions to the above are blends which are exact
9900 // interleavings with direct instructions supporting them. We currently don't
9901 // handle those well here.
9902 if (Subtarget->hasSSSE3()) {
9905 bool V1InUse = false;
9906 bool V2InUse = false;
9907 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9909 for (int i = 0; i < 16; ++i) {
9910 if (Mask[i] == -1) {
9911 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9913 const int ZeroMask = 0x80;
9914 int V1Idx = (Mask[i] < 16 ? Mask[i] : ZeroMask);
9915 int V2Idx = (Mask[i] < 16 ? ZeroMask : Mask[i] - 16);
9917 V1Idx = V2Idx = ZeroMask;
9918 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
9919 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
9920 V1InUse |= (ZeroMask != V1Idx);
9921 V2InUse |= (ZeroMask != V2Idx);
9926 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
9927 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9929 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
9930 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9932 // If we need shuffled inputs from both, blend the two.
9933 if (V1InUse && V2InUse)
9934 return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9936 return V1; // Single inputs are easy.
9938 return V2; // Single inputs are easy.
9939 // Shuffling to a zeroable vector.
9940 return getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
9943 // There are special ways we can lower some single-element blends.
9944 if (NumV2Elements == 1)
9945 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
9946 Mask, Subtarget, DAG))
9949 // Check whether a compaction lowering can be done. This handles shuffles
9950 // which take every Nth element for some even N. See the helper function for
9953 // We special case these as they can be particularly efficiently handled with
9954 // the PACKUSB instruction on x86 and they show up in common patterns of
9955 // rearranging bytes to truncate wide elements.
9956 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
9957 // NumEvenDrops is the power of two stride of the elements. Another way of
9958 // thinking about it is that we need to drop the even elements this many
9959 // times to get the original input.
9960 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9962 // First we need to zero all the dropped bytes.
9963 assert(NumEvenDrops <= 3 &&
9964 "No support for dropping even elements more than 3 times.");
9965 // We use the mask type to pick which bytes are preserved based on how many
9966 // elements are dropped.
9967 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
9968 SDValue ByteClearMask =
9969 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
9970 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
9971 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
9973 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
9975 // Now pack things back together.
9976 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
9977 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
9978 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
9979 for (int i = 1; i < NumEvenDrops; ++i) {
9980 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
9981 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
9987 int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9988 int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9989 int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9990 int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9992 auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
9993 MutableArrayRef<int> V1HalfBlendMask,
9994 MutableArrayRef<int> V2HalfBlendMask) {
9995 for (int i = 0; i < 8; ++i)
9996 if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
9997 V1HalfBlendMask[i] = HalfMask[i];
9999 } else if (HalfMask[i] >= 16) {
10000 V2HalfBlendMask[i] = HalfMask[i] - 16;
10001 HalfMask[i] = i + 8;
10004 buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
10005 buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
10007 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
10009 auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
10010 MutableArrayRef<int> HiBlendMask) {
10012 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
10013 // them out and avoid using UNPCK{L,H} to extract the elements of V as
10015 if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
10016 [](int M) { return M >= 0 && M % 2 == 1; }) &&
10017 std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
10018 [](int M) { return M >= 0 && M % 2 == 1; })) {
10019 // Use a mask to drop the high bytes.
10020 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
10021 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
10022 DAG.getConstant(0x00FF, MVT::v8i16));
10024 // This will be a single vector shuffle instead of a blend so nuke V2.
10025 V2 = DAG.getUNDEF(MVT::v8i16);
10027 // Squash the masks to point directly into V1.
10028 for (int &M : LoBlendMask)
10031 for (int &M : HiBlendMask)
10035 // Otherwise just unpack the low half of V into V1 and the high half into
10036 // V2 so that we can blend them as i16s.
10037 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10038 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
10039 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10040 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
10043 SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
10044 SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
10045 return std::make_pair(BlendedLo, BlendedHi);
10047 SDValue V1Lo, V1Hi, V2Lo, V2Hi;
10048 std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
10049 std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
10051 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
10052 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
10054 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
10057 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
10059 /// This routine breaks down the specific type of 128-bit shuffle and
10060 /// dispatches to the lowering routines accordingly.
10061 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10062 MVT VT, const X86Subtarget *Subtarget,
10063 SelectionDAG &DAG) {
10064 switch (VT.SimpleTy) {
10066 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10068 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10070 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10072 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10074 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10076 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10079 llvm_unreachable("Unimplemented!");
10083 /// \brief Helper function to test whether a shuffle mask could be
10084 /// simplified by widening the elements being shuffled.
10086 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
10087 /// leaves it in an unspecified state.
10089 /// NOTE: This must handle normal vector shuffle masks and *target* vector
10090 /// shuffle masks. The latter have the special property of a '-2' representing
10091 /// a zero-ed lane of a vector.
10092 static bool canWidenShuffleElements(ArrayRef<int> Mask,
10093 SmallVectorImpl<int> &WidenedMask) {
10094 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
10095 // If both elements are undef, its trivial.
10096 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
10097 WidenedMask.push_back(SM_SentinelUndef);
10101 // Check for an undef mask and a mask value properly aligned to fit with
10102 // a pair of values. If we find such a case, use the non-undef mask's value.
10103 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
10104 WidenedMask.push_back(Mask[i + 1] / 2);
10107 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
10108 WidenedMask.push_back(Mask[i] / 2);
10112 // When zeroing, we need to spread the zeroing across both lanes to widen.
10113 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
10114 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
10115 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
10116 WidenedMask.push_back(SM_SentinelZero);
10122 // Finally check if the two mask values are adjacent and aligned with
10124 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
10125 WidenedMask.push_back(Mask[i] / 2);
10129 // Otherwise we can't safely widen the elements used in this shuffle.
10132 assert(WidenedMask.size() == Mask.size() / 2 &&
10133 "Incorrect size of mask after widening the elements!");
10138 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
10140 /// This routine just extracts two subvectors, shuffles them independently, and
10141 /// then concatenates them back together. This should work effectively with all
10142 /// AVX vector shuffle types.
10143 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10144 SDValue V2, ArrayRef<int> Mask,
10145 SelectionDAG &DAG) {
10146 assert(VT.getSizeInBits() >= 256 &&
10147 "Only for 256-bit or wider vector shuffles!");
10148 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
10149 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
10151 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
10152 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
10154 int NumElements = VT.getVectorNumElements();
10155 int SplitNumElements = NumElements / 2;
10156 MVT ScalarVT = VT.getScalarType();
10157 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
10159 // Rather than splitting build-vectors, just build two narrower build
10160 // vectors. This helps shuffling with splats and zeros.
10161 auto SplitVector = [&](SDValue V) {
10162 while (V.getOpcode() == ISD::BITCAST)
10163 V = V->getOperand(0);
10165 MVT OrigVT = V.getSimpleValueType();
10166 int OrigNumElements = OrigVT.getVectorNumElements();
10167 int OrigSplitNumElements = OrigNumElements / 2;
10168 MVT OrigScalarVT = OrigVT.getScalarType();
10169 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
10173 auto *BV = dyn_cast<BuildVectorSDNode>(V);
10175 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10176 DAG.getIntPtrConstant(0));
10177 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10178 DAG.getIntPtrConstant(OrigSplitNumElements));
10181 SmallVector<SDValue, 16> LoOps, HiOps;
10182 for (int i = 0; i < OrigSplitNumElements; ++i) {
10183 LoOps.push_back(BV->getOperand(i));
10184 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
10186 LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
10187 HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
10189 return std::make_pair(DAG.getNode(ISD::BITCAST, DL, SplitVT, LoV),
10190 DAG.getNode(ISD::BITCAST, DL, SplitVT, HiV));
10193 SDValue LoV1, HiV1, LoV2, HiV2;
10194 std::tie(LoV1, HiV1) = SplitVector(V1);
10195 std::tie(LoV2, HiV2) = SplitVector(V2);
10197 // Now create two 4-way blends of these half-width vectors.
10198 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10199 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10200 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
10201 for (int i = 0; i < SplitNumElements; ++i) {
10202 int M = HalfMask[i];
10203 if (M >= NumElements) {
10204 if (M >= NumElements + SplitNumElements)
10208 V2BlendMask.push_back(M - NumElements);
10209 V1BlendMask.push_back(-1);
10210 BlendMask.push_back(SplitNumElements + i);
10211 } else if (M >= 0) {
10212 if (M >= SplitNumElements)
10216 V2BlendMask.push_back(-1);
10217 V1BlendMask.push_back(M);
10218 BlendMask.push_back(i);
10220 V2BlendMask.push_back(-1);
10221 V1BlendMask.push_back(-1);
10222 BlendMask.push_back(-1);
10226 // Because the lowering happens after all combining takes place, we need to
10227 // manually combine these blend masks as much as possible so that we create
10228 // a minimal number of high-level vector shuffle nodes.
10230 // First try just blending the halves of V1 or V2.
10231 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10232 return DAG.getUNDEF(SplitVT);
10233 if (!UseLoV2 && !UseHiV2)
10234 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10235 if (!UseLoV1 && !UseHiV1)
10236 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10238 SDValue V1Blend, V2Blend;
10239 if (UseLoV1 && UseHiV1) {
10241 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10243 // We only use half of V1 so map the usage down into the final blend mask.
10244 V1Blend = UseLoV1 ? LoV1 : HiV1;
10245 for (int i = 0; i < SplitNumElements; ++i)
10246 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10247 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10249 if (UseLoV2 && UseHiV2) {
10251 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10253 // We only use half of V2 so map the usage down into the final blend mask.
10254 V2Blend = UseLoV2 ? LoV2 : HiV2;
10255 for (int i = 0; i < SplitNumElements; ++i)
10256 if (BlendMask[i] >= SplitNumElements)
10257 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10259 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10261 SDValue Lo = HalfBlend(LoMask);
10262 SDValue Hi = HalfBlend(HiMask);
10263 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10266 /// \brief Either split a vector in halves or decompose the shuffles and the
10269 /// This is provided as a good fallback for many lowerings of non-single-input
10270 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10271 /// between splitting the shuffle into 128-bit components and stitching those
10272 /// back together vs. extracting the single-input shuffles and blending those
10274 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10275 SDValue V2, ArrayRef<int> Mask,
10276 SelectionDAG &DAG) {
10277 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10278 "lower single-input shuffles as it "
10279 "could then recurse on itself.");
10280 int Size = Mask.size();
10282 // If this can be modeled as a broadcast of two elements followed by a blend,
10283 // prefer that lowering. This is especially important because broadcasts can
10284 // often fold with memory operands.
10285 auto DoBothBroadcast = [&] {
10286 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10289 if (V2BroadcastIdx == -1)
10290 V2BroadcastIdx = M - Size;
10291 else if (M - Size != V2BroadcastIdx)
10293 } else if (M >= 0) {
10294 if (V1BroadcastIdx == -1)
10295 V1BroadcastIdx = M;
10296 else if (M != V1BroadcastIdx)
10301 if (DoBothBroadcast())
10302 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10305 // If the inputs all stem from a single 128-bit lane of each input, then we
10306 // split them rather than blending because the split will decompose to
10307 // unusually few instructions.
10308 int LaneCount = VT.getSizeInBits() / 128;
10309 int LaneSize = Size / LaneCount;
10310 SmallBitVector LaneInputs[2];
10311 LaneInputs[0].resize(LaneCount, false);
10312 LaneInputs[1].resize(LaneCount, false);
10313 for (int i = 0; i < Size; ++i)
10315 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10316 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10317 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10319 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10320 // that the decomposed single-input shuffles don't end up here.
10321 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10324 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10325 /// a permutation and blend of those lanes.
10327 /// This essentially blends the out-of-lane inputs to each lane into the lane
10328 /// from a permuted copy of the vector. This lowering strategy results in four
10329 /// instructions in the worst case for a single-input cross lane shuffle which
10330 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10331 /// of. Special cases for each particular shuffle pattern should be handled
10332 /// prior to trying this lowering.
10333 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10334 SDValue V1, SDValue V2,
10335 ArrayRef<int> Mask,
10336 SelectionDAG &DAG) {
10337 // FIXME: This should probably be generalized for 512-bit vectors as well.
10338 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10339 int LaneSize = Mask.size() / 2;
10341 // If there are only inputs from one 128-bit lane, splitting will in fact be
10342 // less expensive. The flags track wether the given lane contains an element
10343 // that crosses to another lane.
10344 bool LaneCrossing[2] = {false, false};
10345 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10346 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10347 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10348 if (!LaneCrossing[0] || !LaneCrossing[1])
10349 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10351 if (isSingleInputShuffleMask(Mask)) {
10352 SmallVector<int, 32> FlippedBlendMask;
10353 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10354 FlippedBlendMask.push_back(
10355 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10357 : Mask[i] % LaneSize +
10358 (i / LaneSize) * LaneSize + Size));
10360 // Flip the vector, and blend the results which should now be in-lane. The
10361 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10362 // 5 for the high source. The value 3 selects the high half of source 2 and
10363 // the value 2 selects the low half of source 2. We only use source 2 to
10364 // allow folding it into a memory operand.
10365 unsigned PERMMask = 3 | 2 << 4;
10366 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10367 V1, DAG.getConstant(PERMMask, MVT::i8));
10368 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10371 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10372 // will be handled by the above logic and a blend of the results, much like
10373 // other patterns in AVX.
10374 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10377 /// \brief Handle lowering 2-lane 128-bit shuffles.
10378 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10379 SDValue V2, ArrayRef<int> Mask,
10380 const X86Subtarget *Subtarget,
10381 SelectionDAG &DAG) {
10382 // Blends are faster and handle all the non-lane-crossing cases.
10383 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10387 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10388 VT.getVectorNumElements() / 2);
10389 // Check for patterns which can be matched with a single insert of a 128-bit
10391 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 0, 1) ||
10392 isShuffleEquivalent(V1, V2, Mask, 0, 1, 4, 5)) {
10393 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10394 DAG.getIntPtrConstant(0));
10395 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10396 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10397 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10399 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 6, 7)) {
10400 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10401 DAG.getIntPtrConstant(0));
10402 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10403 DAG.getIntPtrConstant(2));
10404 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10407 // Otherwise form a 128-bit permutation.
10408 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10409 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10410 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10411 DAG.getConstant(PermMask, MVT::i8));
10414 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10415 /// shuffling each lane.
10417 /// This will only succeed when the result of fixing the 128-bit lanes results
10418 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10419 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10420 /// the lane crosses early and then use simpler shuffles within each lane.
10422 /// FIXME: It might be worthwhile at some point to support this without
10423 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10424 /// in x86 only floating point has interesting non-repeating shuffles, and even
10425 /// those are still *marginally* more expensive.
10426 static SDValue lowerVectorShuffleByMerging128BitLanes(
10427 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10428 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10429 assert(!isSingleInputShuffleMask(Mask) &&
10430 "This is only useful with multiple inputs.");
10432 int Size = Mask.size();
10433 int LaneSize = 128 / VT.getScalarSizeInBits();
10434 int NumLanes = Size / LaneSize;
10435 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10437 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10438 // check whether the in-128-bit lane shuffles share a repeating pattern.
10439 SmallVector<int, 4> Lanes;
10440 Lanes.resize(NumLanes, -1);
10441 SmallVector<int, 4> InLaneMask;
10442 InLaneMask.resize(LaneSize, -1);
10443 for (int i = 0; i < Size; ++i) {
10447 int j = i / LaneSize;
10449 if (Lanes[j] < 0) {
10450 // First entry we've seen for this lane.
10451 Lanes[j] = Mask[i] / LaneSize;
10452 } else if (Lanes[j] != Mask[i] / LaneSize) {
10453 // This doesn't match the lane selected previously!
10457 // Check that within each lane we have a consistent shuffle mask.
10458 int k = i % LaneSize;
10459 if (InLaneMask[k] < 0) {
10460 InLaneMask[k] = Mask[i] % LaneSize;
10461 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10462 // This doesn't fit a repeating in-lane mask.
10467 // First shuffle the lanes into place.
10468 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10469 VT.getSizeInBits() / 64);
10470 SmallVector<int, 8> LaneMask;
10471 LaneMask.resize(NumLanes * 2, -1);
10472 for (int i = 0; i < NumLanes; ++i)
10473 if (Lanes[i] >= 0) {
10474 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10475 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10478 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10479 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10480 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10482 // Cast it back to the type we actually want.
10483 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10485 // Now do a simple shuffle that isn't lane crossing.
10486 SmallVector<int, 8> NewMask;
10487 NewMask.resize(Size, -1);
10488 for (int i = 0; i < Size; ++i)
10490 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10491 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10492 "Must not introduce lane crosses at this point!");
10494 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10497 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10500 /// This returns true if the elements from a particular input are already in the
10501 /// slot required by the given mask and require no permutation.
10502 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10503 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10504 int Size = Mask.size();
10505 for (int i = 0; i < Size; ++i)
10506 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10512 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10514 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10515 /// isn't available.
10516 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10517 const X86Subtarget *Subtarget,
10518 SelectionDAG &DAG) {
10520 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10521 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10522 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10523 ArrayRef<int> Mask = SVOp->getMask();
10524 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10526 SmallVector<int, 4> WidenedMask;
10527 if (canWidenShuffleElements(Mask, WidenedMask))
10528 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10531 if (isSingleInputShuffleMask(Mask)) {
10532 // Check for being able to broadcast a single element.
10533 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10534 Mask, Subtarget, DAG))
10537 // Use low duplicate instructions for masks that match their pattern.
10538 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
10539 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10541 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10542 // Non-half-crossing single input shuffles can be lowerid with an
10543 // interleaved permutation.
10544 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10545 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10546 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10547 DAG.getConstant(VPERMILPMask, MVT::i8));
10550 // With AVX2 we have direct support for this permutation.
10551 if (Subtarget->hasAVX2())
10552 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10553 getV4X86ShuffleImm8ForMask(Mask, DAG));
10555 // Otherwise, fall back.
10556 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10560 // X86 has dedicated unpack instructions that can handle specific blend
10561 // operations: UNPCKH and UNPCKL.
10562 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10563 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10564 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10565 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10567 // If we have a single input to the zero element, insert that into V1 if we
10568 // can do so cheaply.
10569 int NumV2Elements =
10570 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10571 if (NumV2Elements == 1 && Mask[0] >= 4)
10572 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10573 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10576 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10580 // Check if the blend happens to exactly fit that of SHUFPD.
10581 if ((Mask[0] == -1 || Mask[0] < 2) &&
10582 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10583 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10584 (Mask[3] == -1 || Mask[3] >= 6)) {
10585 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10586 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10587 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10588 DAG.getConstant(SHUFPDMask, MVT::i8));
10590 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10591 (Mask[1] == -1 || Mask[1] < 2) &&
10592 (Mask[2] == -1 || Mask[2] >= 6) &&
10593 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10594 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10595 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10596 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10597 DAG.getConstant(SHUFPDMask, MVT::i8));
10600 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10601 // shuffle. However, if we have AVX2 and either inputs are already in place,
10602 // we will be able to shuffle even across lanes the other input in a single
10603 // instruction so skip this pattern.
10604 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10605 isShuffleMaskInputInPlace(1, Mask))))
10606 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10607 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10610 // If we have AVX2 then we always want to lower with a blend because an v4 we
10611 // can fully permute the elements.
10612 if (Subtarget->hasAVX2())
10613 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10616 // Otherwise fall back on generic lowering.
10617 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10620 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10622 /// This routine is only called when we have AVX2 and thus a reasonable
10623 /// instruction set for v4i64 shuffling..
10624 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10625 const X86Subtarget *Subtarget,
10626 SelectionDAG &DAG) {
10628 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10629 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10630 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10631 ArrayRef<int> Mask = SVOp->getMask();
10632 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10633 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10635 SmallVector<int, 4> WidenedMask;
10636 if (canWidenShuffleElements(Mask, WidenedMask))
10637 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10640 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10644 // Check for being able to broadcast a single element.
10645 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10646 Mask, Subtarget, DAG))
10649 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10650 // use lower latency instructions that will operate on both 128-bit lanes.
10651 SmallVector<int, 2> RepeatedMask;
10652 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10653 if (isSingleInputShuffleMask(Mask)) {
10654 int PSHUFDMask[] = {-1, -1, -1, -1};
10655 for (int i = 0; i < 2; ++i)
10656 if (RepeatedMask[i] >= 0) {
10657 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10658 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10660 return DAG.getNode(
10661 ISD::BITCAST, DL, MVT::v4i64,
10662 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10663 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10664 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10667 // Use dedicated unpack instructions for masks that match their pattern.
10668 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10669 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10670 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10671 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10674 // AVX2 provides a direct instruction for permuting a single input across
10676 if (isSingleInputShuffleMask(Mask))
10677 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10678 getV4X86ShuffleImm8ForMask(Mask, DAG));
10680 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10681 // shuffle. However, if we have AVX2 and either inputs are already in place,
10682 // we will be able to shuffle even across lanes the other input in a single
10683 // instruction so skip this pattern.
10684 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10685 isShuffleMaskInputInPlace(1, Mask))))
10686 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10687 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10690 // Otherwise fall back on generic blend lowering.
10691 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10695 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10697 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10698 /// isn't available.
10699 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10700 const X86Subtarget *Subtarget,
10701 SelectionDAG &DAG) {
10703 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10704 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10705 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10706 ArrayRef<int> Mask = SVOp->getMask();
10707 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10709 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10713 // Check for being able to broadcast a single element.
10714 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10715 Mask, Subtarget, DAG))
10718 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10719 // options to efficiently lower the shuffle.
10720 SmallVector<int, 4> RepeatedMask;
10721 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10722 assert(RepeatedMask.size() == 4 &&
10723 "Repeated masks must be half the mask width!");
10725 // Use even/odd duplicate instructions for masks that match their pattern.
10726 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10727 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10728 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10729 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10731 if (isSingleInputShuffleMask(Mask))
10732 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10733 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10735 // Use dedicated unpack instructions for masks that match their pattern.
10736 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10737 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10738 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10739 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10741 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10742 // have already handled any direct blends. We also need to squash the
10743 // repeated mask into a simulated v4f32 mask.
10744 for (int i = 0; i < 4; ++i)
10745 if (RepeatedMask[i] >= 8)
10746 RepeatedMask[i] -= 4;
10747 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10750 // If we have a single input shuffle with different shuffle patterns in the
10751 // two 128-bit lanes use the variable mask to VPERMILPS.
10752 if (isSingleInputShuffleMask(Mask)) {
10753 SDValue VPermMask[8];
10754 for (int i = 0; i < 8; ++i)
10755 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10756 : DAG.getConstant(Mask[i], MVT::i32);
10757 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10758 return DAG.getNode(
10759 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10760 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10762 if (Subtarget->hasAVX2())
10763 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10764 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10765 DAG.getNode(ISD::BUILD_VECTOR, DL,
10766 MVT::v8i32, VPermMask)),
10769 // Otherwise, fall back.
10770 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10774 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10776 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10777 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10780 // If we have AVX2 then we always want to lower with a blend because at v8 we
10781 // can fully permute the elements.
10782 if (Subtarget->hasAVX2())
10783 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10786 // Otherwise fall back on generic lowering.
10787 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10790 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10792 /// This routine is only called when we have AVX2 and thus a reasonable
10793 /// instruction set for v8i32 shuffling..
10794 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10795 const X86Subtarget *Subtarget,
10796 SelectionDAG &DAG) {
10798 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10799 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10800 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10801 ArrayRef<int> Mask = SVOp->getMask();
10802 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10803 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10805 // Whenever we can lower this as a zext, that instruction is strictly faster
10806 // than any alternative. It also allows us to fold memory operands into the
10807 // shuffle in many cases.
10808 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
10809 Mask, Subtarget, DAG))
10812 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10816 // Check for being able to broadcast a single element.
10817 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10818 Mask, Subtarget, DAG))
10821 // If the shuffle mask is repeated in each 128-bit lane we can use more
10822 // efficient instructions that mirror the shuffles across the two 128-bit
10824 SmallVector<int, 4> RepeatedMask;
10825 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10826 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10827 if (isSingleInputShuffleMask(Mask))
10828 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10829 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10831 // Use dedicated unpack instructions for masks that match their pattern.
10832 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10833 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10834 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10835 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10838 // If the shuffle patterns aren't repeated but it is a single input, directly
10839 // generate a cross-lane VPERMD instruction.
10840 if (isSingleInputShuffleMask(Mask)) {
10841 SDValue VPermMask[8];
10842 for (int i = 0; i < 8; ++i)
10843 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10844 : DAG.getConstant(Mask[i], MVT::i32);
10845 return DAG.getNode(
10846 X86ISD::VPERMV, DL, MVT::v8i32,
10847 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10850 // Try to use bit shift instructions.
10851 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10852 DL, MVT::v8i32, V1, V2, Mask, DAG))
10855 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10857 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10858 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10861 // Otherwise fall back on generic blend lowering.
10862 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10866 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10868 /// This routine is only called when we have AVX2 and thus a reasonable
10869 /// instruction set for v16i16 shuffling..
10870 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10871 const X86Subtarget *Subtarget,
10872 SelectionDAG &DAG) {
10874 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10875 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10876 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10877 ArrayRef<int> Mask = SVOp->getMask();
10878 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10879 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10881 // Whenever we can lower this as a zext, that instruction is strictly faster
10882 // than any alternative. It also allows us to fold memory operands into the
10883 // shuffle in many cases.
10884 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
10885 Mask, Subtarget, DAG))
10888 // Check for being able to broadcast a single element.
10889 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
10890 Mask, Subtarget, DAG))
10893 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10897 // Use dedicated unpack instructions for masks that match their pattern.
10898 if (isShuffleEquivalent(V1, V2, Mask,
10899 // First 128-bit lane:
10900 0, 16, 1, 17, 2, 18, 3, 19,
10901 // Second 128-bit lane:
10902 8, 24, 9, 25, 10, 26, 11, 27))
10903 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
10904 if (isShuffleEquivalent(V1, V2, Mask,
10905 // First 128-bit lane:
10906 4, 20, 5, 21, 6, 22, 7, 23,
10907 // Second 128-bit lane:
10908 12, 28, 13, 29, 14, 30, 15, 31))
10909 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
10911 if (isSingleInputShuffleMask(Mask)) {
10912 // There are no generalized cross-lane shuffle operations available on i16
10914 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
10915 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
10918 SDValue PSHUFBMask[32];
10919 for (int i = 0; i < 16; ++i) {
10920 if (Mask[i] == -1) {
10921 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
10925 int M = i < 8 ? Mask[i] : Mask[i] - 8;
10926 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
10927 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
10928 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
10930 return DAG.getNode(
10931 ISD::BITCAST, DL, MVT::v16i16,
10933 X86ISD::PSHUFB, DL, MVT::v32i8,
10934 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
10935 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
10938 // Try to use bit shift instructions.
10939 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10940 DL, MVT::v16i16, V1, V2, Mask, DAG))
10943 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10945 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10946 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10949 // Otherwise fall back on generic lowering.
10950 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
10953 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
10955 /// This routine is only called when we have AVX2 and thus a reasonable
10956 /// instruction set for v32i8 shuffling..
10957 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10958 const X86Subtarget *Subtarget,
10959 SelectionDAG &DAG) {
10961 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10962 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10963 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10964 ArrayRef<int> Mask = SVOp->getMask();
10965 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10966 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
10968 // Whenever we can lower this as a zext, that instruction is strictly faster
10969 // than any alternative. It also allows us to fold memory operands into the
10970 // shuffle in many cases.
10971 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
10972 Mask, Subtarget, DAG))
10975 // Check for being able to broadcast a single element.
10976 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
10977 Mask, Subtarget, DAG))
10980 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
10984 // Use dedicated unpack instructions for masks that match their pattern.
10985 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
10987 if (isShuffleEquivalent(
10989 // First 128-bit lane:
10990 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
10991 // Second 128-bit lane:
10992 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
10993 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
10994 if (isShuffleEquivalent(
10996 // First 128-bit lane:
10997 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
10998 // Second 128-bit lane:
10999 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
11000 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
11002 if (isSingleInputShuffleMask(Mask)) {
11003 // There are no generalized cross-lane shuffle operations available on i8
11005 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
11006 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
11009 SDValue PSHUFBMask[32];
11010 for (int i = 0; i < 32; ++i)
11013 ? DAG.getUNDEF(MVT::i8)
11014 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
11016 return DAG.getNode(
11017 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
11018 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
11021 // Try to use bit shift instructions.
11022 if (SDValue Shift = lowerVectorShuffleAsBitShift(
11023 DL, MVT::v32i8, V1, V2, Mask, DAG))
11026 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11028 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11029 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11032 // Otherwise fall back on generic lowering.
11033 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
11036 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
11038 /// This routine either breaks down the specific type of a 256-bit x86 vector
11039 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
11040 /// together based on the available instructions.
11041 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11042 MVT VT, const X86Subtarget *Subtarget,
11043 SelectionDAG &DAG) {
11045 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11046 ArrayRef<int> Mask = SVOp->getMask();
11048 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
11049 // check for those subtargets here and avoid much of the subtarget querying in
11050 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
11051 // ability to manipulate a 256-bit vector with integer types. Since we'll use
11052 // floating point types there eventually, just immediately cast everything to
11053 // a float and operate entirely in that domain.
11054 if (VT.isInteger() && !Subtarget->hasAVX2()) {
11055 int ElementBits = VT.getScalarSizeInBits();
11056 if (ElementBits < 32)
11057 // No floating point type available, decompose into 128-bit vectors.
11058 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11060 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
11061 VT.getVectorNumElements());
11062 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
11063 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
11064 return DAG.getNode(ISD::BITCAST, DL, VT,
11065 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
11068 switch (VT.SimpleTy) {
11070 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11072 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11074 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11076 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11078 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11080 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11083 llvm_unreachable("Not a valid 256-bit x86 vector type!");
11087 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
11088 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11089 const X86Subtarget *Subtarget,
11090 SelectionDAG &DAG) {
11092 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11093 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11094 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11095 ArrayRef<int> Mask = SVOp->getMask();
11096 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11098 // X86 has dedicated unpack instructions that can handle specific blend
11099 // operations: UNPCKH and UNPCKL.
11100 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11101 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
11102 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11103 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
11105 // FIXME: Implement direct support for this type!
11106 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
11109 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
11110 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11111 const X86Subtarget *Subtarget,
11112 SelectionDAG &DAG) {
11114 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11115 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11116 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11117 ArrayRef<int> Mask = SVOp->getMask();
11118 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11120 // Use dedicated unpack instructions for masks that match their pattern.
11121 if (isShuffleEquivalent(V1, V2, Mask,
11122 0, 16, 1, 17, 4, 20, 5, 21,
11123 8, 24, 9, 25, 12, 28, 13, 29))
11124 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
11125 if (isShuffleEquivalent(V1, V2, Mask,
11126 2, 18, 3, 19, 6, 22, 7, 23,
11127 10, 26, 11, 27, 14, 30, 15, 31))
11128 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
11130 // FIXME: Implement direct support for this type!
11131 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
11134 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
11135 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11136 const X86Subtarget *Subtarget,
11137 SelectionDAG &DAG) {
11139 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11140 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11141 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11142 ArrayRef<int> Mask = SVOp->getMask();
11143 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11145 // X86 has dedicated unpack instructions that can handle specific blend
11146 // operations: UNPCKH and UNPCKL.
11147 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11148 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
11149 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11150 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
11152 // FIXME: Implement direct support for this type!
11153 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
11156 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11157 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11158 const X86Subtarget *Subtarget,
11159 SelectionDAG &DAG) {
11161 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11162 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11163 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11164 ArrayRef<int> Mask = SVOp->getMask();
11165 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11167 // Use dedicated unpack instructions for masks that match their pattern.
11168 if (isShuffleEquivalent(V1, V2, Mask,
11169 0, 16, 1, 17, 4, 20, 5, 21,
11170 8, 24, 9, 25, 12, 28, 13, 29))
11171 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
11172 if (isShuffleEquivalent(V1, V2, Mask,
11173 2, 18, 3, 19, 6, 22, 7, 23,
11174 10, 26, 11, 27, 14, 30, 15, 31))
11175 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
11177 // FIXME: Implement direct support for this type!
11178 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
11181 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11182 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11183 const X86Subtarget *Subtarget,
11184 SelectionDAG &DAG) {
11186 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11187 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11188 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11189 ArrayRef<int> Mask = SVOp->getMask();
11190 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11191 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
11193 // FIXME: Implement direct support for this type!
11194 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
11197 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
11198 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11199 const X86Subtarget *Subtarget,
11200 SelectionDAG &DAG) {
11202 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11203 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11204 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11205 ArrayRef<int> Mask = SVOp->getMask();
11206 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
11207 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
11209 // FIXME: Implement direct support for this type!
11210 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
11213 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
11215 /// This routine either breaks down the specific type of a 512-bit x86 vector
11216 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
11217 /// together based on the available instructions.
11218 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11219 MVT VT, const X86Subtarget *Subtarget,
11220 SelectionDAG &DAG) {
11222 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11223 ArrayRef<int> Mask = SVOp->getMask();
11224 assert(Subtarget->hasAVX512() &&
11225 "Cannot lower 512-bit vectors w/ basic ISA!");
11227 // Check for being able to broadcast a single element.
11228 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
11229 Mask, Subtarget, DAG))
11232 // Dispatch to each element type for lowering. If we don't have supprot for
11233 // specific element type shuffles at 512 bits, immediately split them and
11234 // lower them. Each lowering routine of a given type is allowed to assume that
11235 // the requisite ISA extensions for that element type are available.
11236 switch (VT.SimpleTy) {
11238 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11240 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11242 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11244 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11246 if (Subtarget->hasBWI())
11247 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11250 if (Subtarget->hasBWI())
11251 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11255 llvm_unreachable("Not a valid 512-bit x86 vector type!");
11258 // Otherwise fall back on splitting.
11259 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11262 /// \brief Top-level lowering for x86 vector shuffles.
11264 /// This handles decomposition, canonicalization, and lowering of all x86
11265 /// vector shuffles. Most of the specific lowering strategies are encapsulated
11266 /// above in helper routines. The canonicalization attempts to widen shuffles
11267 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
11268 /// s.t. only one of the two inputs needs to be tested, etc.
11269 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
11270 SelectionDAG &DAG) {
11271 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11272 ArrayRef<int> Mask = SVOp->getMask();
11273 SDValue V1 = Op.getOperand(0);
11274 SDValue V2 = Op.getOperand(1);
11275 MVT VT = Op.getSimpleValueType();
11276 int NumElements = VT.getVectorNumElements();
11279 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
11281 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
11282 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11283 if (V1IsUndef && V2IsUndef)
11284 return DAG.getUNDEF(VT);
11286 // When we create a shuffle node we put the UNDEF node to second operand,
11287 // but in some cases the first operand may be transformed to UNDEF.
11288 // In this case we should just commute the node.
11290 return DAG.getCommutedVectorShuffle(*SVOp);
11292 // Check for non-undef masks pointing at an undef vector and make the masks
11293 // undef as well. This makes it easier to match the shuffle based solely on
11297 if (M >= NumElements) {
11298 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11299 for (int &M : NewMask)
11300 if (M >= NumElements)
11302 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11305 // We actually see shuffles that are entirely re-arrangements of a set of
11306 // zero inputs. This mostly happens while decomposing complex shuffles into
11307 // simple ones. Directly lower these as a buildvector of zeros.
11308 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
11309 if (Zeroable.all())
11310 return getZeroVector(VT, Subtarget, DAG, dl);
11312 // Try to collapse shuffles into using a vector type with fewer elements but
11313 // wider element types. We cap this to not form integers or floating point
11314 // elements wider than 64 bits, but it might be interesting to form i128
11315 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11316 SmallVector<int, 16> WidenedMask;
11317 if (VT.getScalarSizeInBits() < 64 &&
11318 canWidenShuffleElements(Mask, WidenedMask)) {
11319 MVT NewEltVT = VT.isFloatingPoint()
11320 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11321 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11322 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11323 // Make sure that the new vector type is legal. For example, v2f64 isn't
11325 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11326 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11327 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11328 return DAG.getNode(ISD::BITCAST, dl, VT,
11329 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11333 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11334 for (int M : SVOp->getMask())
11336 ++NumUndefElements;
11337 else if (M < NumElements)
11342 // Commute the shuffle as needed such that more elements come from V1 than
11343 // V2. This allows us to match the shuffle pattern strictly on how many
11344 // elements come from V1 without handling the symmetric cases.
11345 if (NumV2Elements > NumV1Elements)
11346 return DAG.getCommutedVectorShuffle(*SVOp);
11348 // When the number of V1 and V2 elements are the same, try to minimize the
11349 // number of uses of V2 in the low half of the vector. When that is tied,
11350 // ensure that the sum of indices for V1 is equal to or lower than the sum
11351 // indices for V2. When those are equal, try to ensure that the number of odd
11352 // indices for V1 is lower than the number of odd indices for V2.
11353 if (NumV1Elements == NumV2Elements) {
11354 int LowV1Elements = 0, LowV2Elements = 0;
11355 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11356 if (M >= NumElements)
11360 if (LowV2Elements > LowV1Elements) {
11361 return DAG.getCommutedVectorShuffle(*SVOp);
11362 } else if (LowV2Elements == LowV1Elements) {
11363 int SumV1Indices = 0, SumV2Indices = 0;
11364 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11365 if (SVOp->getMask()[i] >= NumElements)
11367 else if (SVOp->getMask()[i] >= 0)
11369 if (SumV2Indices < SumV1Indices) {
11370 return DAG.getCommutedVectorShuffle(*SVOp);
11371 } else if (SumV2Indices == SumV1Indices) {
11372 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11373 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11374 if (SVOp->getMask()[i] >= NumElements)
11375 NumV2OddIndices += i % 2;
11376 else if (SVOp->getMask()[i] >= 0)
11377 NumV1OddIndices += i % 2;
11378 if (NumV2OddIndices < NumV1OddIndices)
11379 return DAG.getCommutedVectorShuffle(*SVOp);
11384 // For each vector width, delegate to a specialized lowering routine.
11385 if (VT.getSizeInBits() == 128)
11386 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11388 if (VT.getSizeInBits() == 256)
11389 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11391 // Force AVX-512 vectors to be scalarized for now.
11392 // FIXME: Implement AVX-512 support!
11393 if (VT.getSizeInBits() == 512)
11394 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11396 llvm_unreachable("Unimplemented!");
11400 //===----------------------------------------------------------------------===//
11401 // Legacy vector shuffle lowering
11403 // This code is the legacy code handling vector shuffles until the above
11404 // replaces its functionality and performance.
11405 //===----------------------------------------------------------------------===//
11407 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11408 bool hasInt256, unsigned *MaskOut = nullptr) {
11409 MVT EltVT = VT.getVectorElementType();
11411 // There is no blend with immediate in AVX-512.
11412 if (VT.is512BitVector())
11415 if (!hasSSE41 || EltVT == MVT::i8)
11417 if (!hasInt256 && VT == MVT::v16i16)
11420 unsigned MaskValue = 0;
11421 unsigned NumElems = VT.getVectorNumElements();
11422 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11423 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11424 unsigned NumElemsInLane = NumElems / NumLanes;
11426 // Blend for v16i16 should be symmetric for both lanes.
11427 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11429 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11430 int EltIdx = MaskVals[i];
11432 if ((EltIdx < 0 || EltIdx == (int)i) &&
11433 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11436 if (((unsigned)EltIdx == (i + NumElems)) &&
11437 (SndLaneEltIdx < 0 ||
11438 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11439 MaskValue |= (1 << i);
11445 *MaskOut = MaskValue;
11449 // Try to lower a shuffle node into a simple blend instruction.
11450 // This function assumes isBlendMask returns true for this
11451 // SuffleVectorSDNode
11452 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11453 unsigned MaskValue,
11454 const X86Subtarget *Subtarget,
11455 SelectionDAG &DAG) {
11456 MVT VT = SVOp->getSimpleValueType(0);
11457 MVT EltVT = VT.getVectorElementType();
11458 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11459 Subtarget->hasInt256() && "Trying to lower a "
11460 "VECTOR_SHUFFLE to a Blend but "
11461 "with the wrong mask"));
11462 SDValue V1 = SVOp->getOperand(0);
11463 SDValue V2 = SVOp->getOperand(1);
11465 unsigned NumElems = VT.getVectorNumElements();
11467 // Convert i32 vectors to floating point if it is not AVX2.
11468 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11470 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11471 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11473 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11474 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11477 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11478 DAG.getConstant(MaskValue, MVT::i32));
11479 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11482 /// In vector type \p VT, return true if the element at index \p InputIdx
11483 /// falls on a different 128-bit lane than \p OutputIdx.
11484 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11485 unsigned OutputIdx) {
11486 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11487 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11490 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11491 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11492 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11493 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11495 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11496 SelectionDAG &DAG) {
11497 MVT VT = V1.getSimpleValueType();
11498 assert(VT.is128BitVector() || VT.is256BitVector());
11500 MVT EltVT = VT.getVectorElementType();
11501 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11502 unsigned NumElts = VT.getVectorNumElements();
11504 SmallVector<SDValue, 32> PshufbMask;
11505 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11506 int InputIdx = MaskVals[OutputIdx];
11507 unsigned InputByteIdx;
11509 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11510 InputByteIdx = 0x80;
11512 // Cross lane is not allowed.
11513 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11515 InputByteIdx = InputIdx * EltSizeInBytes;
11516 // Index is an byte offset within the 128-bit lane.
11517 InputByteIdx &= 0xf;
11520 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11521 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11522 if (InputByteIdx != 0x80)
11527 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11529 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11530 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11531 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11534 // v8i16 shuffles - Prefer shuffles in the following order:
11535 // 1. [all] pshuflw, pshufhw, optional move
11536 // 2. [ssse3] 1 x pshufb
11537 // 3. [ssse3] 2 x pshufb + 1 x por
11538 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11540 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11541 SelectionDAG &DAG) {
11542 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11543 SDValue V1 = SVOp->getOperand(0);
11544 SDValue V2 = SVOp->getOperand(1);
11546 SmallVector<int, 8> MaskVals;
11548 // Determine if more than 1 of the words in each of the low and high quadwords
11549 // of the result come from the same quadword of one of the two inputs. Undef
11550 // mask values count as coming from any quadword, for better codegen.
11552 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11553 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11554 unsigned LoQuad[] = { 0, 0, 0, 0 };
11555 unsigned HiQuad[] = { 0, 0, 0, 0 };
11556 // Indices of quads used.
11557 std::bitset<4> InputQuads;
11558 for (unsigned i = 0; i < 8; ++i) {
11559 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11560 int EltIdx = SVOp->getMaskElt(i);
11561 MaskVals.push_back(EltIdx);
11569 ++Quad[EltIdx / 4];
11570 InputQuads.set(EltIdx / 4);
11573 int BestLoQuad = -1;
11574 unsigned MaxQuad = 1;
11575 for (unsigned i = 0; i < 4; ++i) {
11576 if (LoQuad[i] > MaxQuad) {
11578 MaxQuad = LoQuad[i];
11582 int BestHiQuad = -1;
11584 for (unsigned i = 0; i < 4; ++i) {
11585 if (HiQuad[i] > MaxQuad) {
11587 MaxQuad = HiQuad[i];
11591 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11592 // of the two input vectors, shuffle them into one input vector so only a
11593 // single pshufb instruction is necessary. If there are more than 2 input
11594 // quads, disable the next transformation since it does not help SSSE3.
11595 bool V1Used = InputQuads[0] || InputQuads[1];
11596 bool V2Used = InputQuads[2] || InputQuads[3];
11597 if (Subtarget->hasSSSE3()) {
11598 if (InputQuads.count() == 2 && V1Used && V2Used) {
11599 BestLoQuad = InputQuads[0] ? 0 : 1;
11600 BestHiQuad = InputQuads[2] ? 2 : 3;
11602 if (InputQuads.count() > 2) {
11608 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11609 // the shuffle mask. If a quad is scored as -1, that means that it contains
11610 // words from all 4 input quadwords.
11612 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11614 BestLoQuad < 0 ? 0 : BestLoQuad,
11615 BestHiQuad < 0 ? 1 : BestHiQuad
11617 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11618 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11619 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11620 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11622 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11623 // source words for the shuffle, to aid later transformations.
11624 bool AllWordsInNewV = true;
11625 bool InOrder[2] = { true, true };
11626 for (unsigned i = 0; i != 8; ++i) {
11627 int idx = MaskVals[i];
11629 InOrder[i/4] = false;
11630 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11632 AllWordsInNewV = false;
11636 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11637 if (AllWordsInNewV) {
11638 for (int i = 0; i != 8; ++i) {
11639 int idx = MaskVals[i];
11642 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11643 if ((idx != i) && idx < 4)
11645 if ((idx != i) && idx > 3)
11654 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11655 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11656 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11657 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11658 unsigned TargetMask = 0;
11659 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11660 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11661 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11662 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11663 getShufflePSHUFLWImmediate(SVOp);
11664 V1 = NewV.getOperand(0);
11665 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11669 // Promote splats to a larger type which usually leads to more efficient code.
11670 // FIXME: Is this true if pshufb is available?
11671 if (SVOp->isSplat())
11672 return PromoteSplat(SVOp, DAG);
11674 // If we have SSSE3, and all words of the result are from 1 input vector,
11675 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11676 // is present, fall back to case 4.
11677 if (Subtarget->hasSSSE3()) {
11678 SmallVector<SDValue,16> pshufbMask;
11680 // If we have elements from both input vectors, set the high bit of the
11681 // shuffle mask element to zero out elements that come from V2 in the V1
11682 // mask, and elements that come from V1 in the V2 mask, so that the two
11683 // results can be OR'd together.
11684 bool TwoInputs = V1Used && V2Used;
11685 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11687 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11689 // Calculate the shuffle mask for the second input, shuffle it, and
11690 // OR it with the first shuffled input.
11691 CommuteVectorShuffleMask(MaskVals, 8);
11692 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11693 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11694 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11697 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11698 // and update MaskVals with new element order.
11699 std::bitset<8> InOrder;
11700 if (BestLoQuad >= 0) {
11701 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11702 for (int i = 0; i != 4; ++i) {
11703 int idx = MaskVals[i];
11706 } else if ((idx / 4) == BestLoQuad) {
11707 MaskV[i] = idx & 3;
11711 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11714 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11715 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11716 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11717 NewV.getOperand(0),
11718 getShufflePSHUFLWImmediate(SVOp), DAG);
11722 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11723 // and update MaskVals with the new element order.
11724 if (BestHiQuad >= 0) {
11725 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11726 for (unsigned i = 4; i != 8; ++i) {
11727 int idx = MaskVals[i];
11730 } else if ((idx / 4) == BestHiQuad) {
11731 MaskV[i] = (idx & 3) + 4;
11735 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11738 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11739 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11740 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11741 NewV.getOperand(0),
11742 getShufflePSHUFHWImmediate(SVOp), DAG);
11746 // In case BestHi & BestLo were both -1, which means each quadword has a word
11747 // from each of the four input quadwords, calculate the InOrder bitvector now
11748 // before falling through to the insert/extract cleanup.
11749 if (BestLoQuad == -1 && BestHiQuad == -1) {
11751 for (int i = 0; i != 8; ++i)
11752 if (MaskVals[i] < 0 || MaskVals[i] == i)
11756 // The other elements are put in the right place using pextrw and pinsrw.
11757 for (unsigned i = 0; i != 8; ++i) {
11760 int EltIdx = MaskVals[i];
11763 SDValue ExtOp = (EltIdx < 8) ?
11764 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11765 DAG.getIntPtrConstant(EltIdx)) :
11766 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11767 DAG.getIntPtrConstant(EltIdx - 8));
11768 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11769 DAG.getIntPtrConstant(i));
11774 /// \brief v16i16 shuffles
11776 /// FIXME: We only support generation of a single pshufb currently. We can
11777 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11778 /// well (e.g 2 x pshufb + 1 x por).
11780 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11781 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11782 SDValue V1 = SVOp->getOperand(0);
11783 SDValue V2 = SVOp->getOperand(1);
11786 if (V2.getOpcode() != ISD::UNDEF)
11789 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11790 return getPSHUFB(MaskVals, V1, dl, DAG);
11793 // v16i8 shuffles - Prefer shuffles in the following order:
11794 // 1. [ssse3] 1 x pshufb
11795 // 2. [ssse3] 2 x pshufb + 1 x por
11796 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11797 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11798 const X86Subtarget* Subtarget,
11799 SelectionDAG &DAG) {
11800 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11801 SDValue V1 = SVOp->getOperand(0);
11802 SDValue V2 = SVOp->getOperand(1);
11804 ArrayRef<int> MaskVals = SVOp->getMask();
11806 // Promote splats to a larger type which usually leads to more efficient code.
11807 // FIXME: Is this true if pshufb is available?
11808 if (SVOp->isSplat())
11809 return PromoteSplat(SVOp, DAG);
11811 // If we have SSSE3, case 1 is generated when all result bytes come from
11812 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11813 // present, fall back to case 3.
11815 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11816 if (Subtarget->hasSSSE3()) {
11817 SmallVector<SDValue,16> pshufbMask;
11819 // If all result elements are from one input vector, then only translate
11820 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11822 // Otherwise, we have elements from both input vectors, and must zero out
11823 // elements that come from V2 in the first mask, and V1 in the second mask
11824 // so that we can OR them together.
11825 for (unsigned i = 0; i != 16; ++i) {
11826 int EltIdx = MaskVals[i];
11827 if (EltIdx < 0 || EltIdx >= 16)
11829 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11831 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11832 DAG.getNode(ISD::BUILD_VECTOR, dl,
11833 MVT::v16i8, pshufbMask));
11835 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11836 // the 2nd operand if it's undefined or zero.
11837 if (V2.getOpcode() == ISD::UNDEF ||
11838 ISD::isBuildVectorAllZeros(V2.getNode()))
11841 // Calculate the shuffle mask for the second input, shuffle it, and
11842 // OR it with the first shuffled input.
11843 pshufbMask.clear();
11844 for (unsigned i = 0; i != 16; ++i) {
11845 int EltIdx = MaskVals[i];
11846 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11847 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11849 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11850 DAG.getNode(ISD::BUILD_VECTOR, dl,
11851 MVT::v16i8, pshufbMask));
11852 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11855 // No SSSE3 - Calculate in place words and then fix all out of place words
11856 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
11857 // the 16 different words that comprise the two doublequadword input vectors.
11858 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11859 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
11861 for (int i = 0; i != 8; ++i) {
11862 int Elt0 = MaskVals[i*2];
11863 int Elt1 = MaskVals[i*2+1];
11865 // This word of the result is all undef, skip it.
11866 if (Elt0 < 0 && Elt1 < 0)
11869 // This word of the result is already in the correct place, skip it.
11870 if ((Elt0 == i*2) && (Elt1 == i*2+1))
11873 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
11874 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
11877 // If Elt0 and Elt1 are defined, are consecutive, and can be load
11878 // using a single extract together, load it and store it.
11879 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
11880 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11881 DAG.getIntPtrConstant(Elt1 / 2));
11882 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11883 DAG.getIntPtrConstant(i));
11887 // If Elt1 is defined, extract it from the appropriate source. If the
11888 // source byte is not also odd, shift the extracted word left 8 bits
11889 // otherwise clear the bottom 8 bits if we need to do an or.
11891 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11892 DAG.getIntPtrConstant(Elt1 / 2));
11893 if ((Elt1 & 1) == 0)
11894 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
11896 TLI.getShiftAmountTy(InsElt.getValueType())));
11897 else if (Elt0 >= 0)
11898 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
11899 DAG.getConstant(0xFF00, MVT::i16));
11901 // If Elt0 is defined, extract it from the appropriate source. If the
11902 // source byte is not also even, shift the extracted word right 8 bits. If
11903 // Elt1 was also defined, OR the extracted values together before
11904 // inserting them in the result.
11906 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
11907 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
11908 if ((Elt0 & 1) != 0)
11909 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
11911 TLI.getShiftAmountTy(InsElt0.getValueType())));
11912 else if (Elt1 >= 0)
11913 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
11914 DAG.getConstant(0x00FF, MVT::i16));
11915 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
11918 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11919 DAG.getIntPtrConstant(i));
11921 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
11924 // v32i8 shuffles - Translate to VPSHUFB if possible.
11926 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
11927 const X86Subtarget *Subtarget,
11928 SelectionDAG &DAG) {
11929 MVT VT = SVOp->getSimpleValueType(0);
11930 SDValue V1 = SVOp->getOperand(0);
11931 SDValue V2 = SVOp->getOperand(1);
11933 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11935 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11936 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
11937 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
11939 // VPSHUFB may be generated if
11940 // (1) one of input vector is undefined or zeroinitializer.
11941 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
11942 // And (2) the mask indexes don't cross the 128-bit lane.
11943 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
11944 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
11947 if (V1IsAllZero && !V2IsAllZero) {
11948 CommuteVectorShuffleMask(MaskVals, 32);
11951 return getPSHUFB(MaskVals, V1, dl, DAG);
11954 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
11955 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
11956 /// done when every pair / quad of shuffle mask elements point to elements in
11957 /// the right sequence. e.g.
11958 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
11960 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
11961 SelectionDAG &DAG) {
11962 MVT VT = SVOp->getSimpleValueType(0);
11964 unsigned NumElems = VT.getVectorNumElements();
11967 switch (VT.SimpleTy) {
11968 default: llvm_unreachable("Unexpected!");
11971 return SDValue(SVOp, 0);
11972 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
11973 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
11974 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
11975 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
11976 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
11977 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
11980 SmallVector<int, 8> MaskVec;
11981 for (unsigned i = 0; i != NumElems; i += Scale) {
11983 for (unsigned j = 0; j != Scale; ++j) {
11984 int EltIdx = SVOp->getMaskElt(i+j);
11988 StartIdx = (EltIdx / Scale);
11989 if (EltIdx != (int)(StartIdx*Scale + j))
11992 MaskVec.push_back(StartIdx);
11995 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
11996 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
11997 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
12000 /// getVZextMovL - Return a zero-extending vector move low node.
12002 static SDValue getVZextMovL(MVT VT, MVT OpVT,
12003 SDValue SrcOp, SelectionDAG &DAG,
12004 const X86Subtarget *Subtarget, SDLoc dl) {
12005 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
12006 LoadSDNode *LD = nullptr;
12007 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
12008 LD = dyn_cast<LoadSDNode>(SrcOp);
12010 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
12012 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
12013 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
12014 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
12015 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
12016 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
12018 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
12019 return DAG.getNode(ISD::BITCAST, dl, VT,
12020 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12021 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
12023 SrcOp.getOperand(0)
12029 return DAG.getNode(ISD::BITCAST, dl, VT,
12030 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12031 DAG.getNode(ISD::BITCAST, dl,
12035 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
12036 /// which could not be matched by any known target speficic shuffle
12038 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12040 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
12041 if (NewOp.getNode())
12044 MVT VT = SVOp->getSimpleValueType(0);
12046 unsigned NumElems = VT.getVectorNumElements();
12047 unsigned NumLaneElems = NumElems / 2;
12050 MVT EltVT = VT.getVectorElementType();
12051 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
12054 SmallVector<int, 16> Mask;
12055 for (unsigned l = 0; l < 2; ++l) {
12056 // Build a shuffle mask for the output, discovering on the fly which
12057 // input vectors to use as shuffle operands (recorded in InputUsed).
12058 // If building a suitable shuffle vector proves too hard, then bail
12059 // out with UseBuildVector set.
12060 bool UseBuildVector = false;
12061 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
12062 unsigned LaneStart = l * NumLaneElems;
12063 for (unsigned i = 0; i != NumLaneElems; ++i) {
12064 // The mask element. This indexes into the input.
12065 int Idx = SVOp->getMaskElt(i+LaneStart);
12067 // the mask element does not index into any input vector.
12068 Mask.push_back(-1);
12072 // The input vector this mask element indexes into.
12073 int Input = Idx / NumLaneElems;
12075 // Turn the index into an offset from the start of the input vector.
12076 Idx -= Input * NumLaneElems;
12078 // Find or create a shuffle vector operand to hold this input.
12080 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
12081 if (InputUsed[OpNo] == Input)
12082 // This input vector is already an operand.
12084 if (InputUsed[OpNo] < 0) {
12085 // Create a new operand for this input vector.
12086 InputUsed[OpNo] = Input;
12091 if (OpNo >= array_lengthof(InputUsed)) {
12092 // More than two input vectors used! Give up on trying to create a
12093 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
12094 UseBuildVector = true;
12098 // Add the mask index for the new shuffle vector.
12099 Mask.push_back(Idx + OpNo * NumLaneElems);
12102 if (UseBuildVector) {
12103 SmallVector<SDValue, 16> SVOps;
12104 for (unsigned i = 0; i != NumLaneElems; ++i) {
12105 // The mask element. This indexes into the input.
12106 int Idx = SVOp->getMaskElt(i+LaneStart);
12108 SVOps.push_back(DAG.getUNDEF(EltVT));
12112 // The input vector this mask element indexes into.
12113 int Input = Idx / NumElems;
12115 // Turn the index into an offset from the start of the input vector.
12116 Idx -= Input * NumElems;
12118 // Extract the vector element by hand.
12119 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
12120 SVOp->getOperand(Input),
12121 DAG.getIntPtrConstant(Idx)));
12124 // Construct the output using a BUILD_VECTOR.
12125 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
12126 } else if (InputUsed[0] < 0) {
12127 // No input vectors were used! The result is undefined.
12128 Output[l] = DAG.getUNDEF(NVT);
12130 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
12131 (InputUsed[0] % 2) * NumLaneElems,
12133 // If only one input was used, use an undefined vector for the other.
12134 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
12135 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
12136 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
12137 // At least one input vector was used. Create a new shuffle vector.
12138 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
12144 // Concatenate the result back
12145 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
12148 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
12149 /// 4 elements, and match them with several different shuffle types.
12151 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12152 SDValue V1 = SVOp->getOperand(0);
12153 SDValue V2 = SVOp->getOperand(1);
12155 MVT VT = SVOp->getSimpleValueType(0);
12157 assert(VT.is128BitVector() && "Unsupported vector size");
12159 std::pair<int, int> Locs[4];
12160 int Mask1[] = { -1, -1, -1, -1 };
12161 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
12163 unsigned NumHi = 0;
12164 unsigned NumLo = 0;
12165 for (unsigned i = 0; i != 4; ++i) {
12166 int Idx = PermMask[i];
12168 Locs[i] = std::make_pair(-1, -1);
12170 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
12172 Locs[i] = std::make_pair(0, NumLo);
12173 Mask1[NumLo] = Idx;
12176 Locs[i] = std::make_pair(1, NumHi);
12178 Mask1[2+NumHi] = Idx;
12184 if (NumLo <= 2 && NumHi <= 2) {
12185 // If no more than two elements come from either vector. This can be
12186 // implemented with two shuffles. First shuffle gather the elements.
12187 // The second shuffle, which takes the first shuffle as both of its
12188 // vector operands, put the elements into the right order.
12189 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12191 int Mask2[] = { -1, -1, -1, -1 };
12193 for (unsigned i = 0; i != 4; ++i)
12194 if (Locs[i].first != -1) {
12195 unsigned Idx = (i < 2) ? 0 : 4;
12196 Idx += Locs[i].first * 2 + Locs[i].second;
12200 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
12203 if (NumLo == 3 || NumHi == 3) {
12204 // Otherwise, we must have three elements from one vector, call it X, and
12205 // one element from the other, call it Y. First, use a shufps to build an
12206 // intermediate vector with the one element from Y and the element from X
12207 // that will be in the same half in the final destination (the indexes don't
12208 // matter). Then, use a shufps to build the final vector, taking the half
12209 // containing the element from Y from the intermediate, and the other half
12212 // Normalize it so the 3 elements come from V1.
12213 CommuteVectorShuffleMask(PermMask, 4);
12217 // Find the element from V2.
12219 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
12220 int Val = PermMask[HiIndex];
12227 Mask1[0] = PermMask[HiIndex];
12229 Mask1[2] = PermMask[HiIndex^1];
12231 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12233 if (HiIndex >= 2) {
12234 Mask1[0] = PermMask[0];
12235 Mask1[1] = PermMask[1];
12236 Mask1[2] = HiIndex & 1 ? 6 : 4;
12237 Mask1[3] = HiIndex & 1 ? 4 : 6;
12238 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12241 Mask1[0] = HiIndex & 1 ? 2 : 0;
12242 Mask1[1] = HiIndex & 1 ? 0 : 2;
12243 Mask1[2] = PermMask[2];
12244 Mask1[3] = PermMask[3];
12249 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
12252 // Break it into (shuffle shuffle_hi, shuffle_lo).
12253 int LoMask[] = { -1, -1, -1, -1 };
12254 int HiMask[] = { -1, -1, -1, -1 };
12256 int *MaskPtr = LoMask;
12257 unsigned MaskIdx = 0;
12258 unsigned LoIdx = 0;
12259 unsigned HiIdx = 2;
12260 for (unsigned i = 0; i != 4; ++i) {
12267 int Idx = PermMask[i];
12269 Locs[i] = std::make_pair(-1, -1);
12270 } else if (Idx < 4) {
12271 Locs[i] = std::make_pair(MaskIdx, LoIdx);
12272 MaskPtr[LoIdx] = Idx;
12275 Locs[i] = std::make_pair(MaskIdx, HiIdx);
12276 MaskPtr[HiIdx] = Idx;
12281 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
12282 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
12283 int MaskOps[] = { -1, -1, -1, -1 };
12284 for (unsigned i = 0; i != 4; ++i)
12285 if (Locs[i].first != -1)
12286 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
12287 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
12290 static bool MayFoldVectorLoad(SDValue V) {
12291 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12292 V = V.getOperand(0);
12294 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12295 V = V.getOperand(0);
12296 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12297 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12298 // BUILD_VECTOR (load), undef
12299 V = V.getOperand(0);
12301 return MayFoldLoad(V);
12305 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12306 MVT VT = Op.getSimpleValueType();
12308 // Canonicalize to v2f64.
12309 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12310 return DAG.getNode(ISD::BITCAST, dl, VT,
12311 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12316 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12318 SDValue V1 = Op.getOperand(0);
12319 SDValue V2 = Op.getOperand(1);
12320 MVT VT = Op.getSimpleValueType();
12322 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12324 if (HasSSE2 && VT == MVT::v2f64)
12325 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12327 // v4f32 or v4i32: canonicalize to v4f32 (which is legal for SSE1)
12328 return DAG.getNode(ISD::BITCAST, dl, VT,
12329 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12330 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12331 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12335 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12336 SDValue V1 = Op.getOperand(0);
12337 SDValue V2 = Op.getOperand(1);
12338 MVT VT = Op.getSimpleValueType();
12340 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12341 "unsupported shuffle type");
12343 if (V2.getOpcode() == ISD::UNDEF)
12347 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12351 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12352 SDValue V1 = Op.getOperand(0);
12353 SDValue V2 = Op.getOperand(1);
12354 MVT VT = Op.getSimpleValueType();
12355 unsigned NumElems = VT.getVectorNumElements();
12357 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12358 // operand of these instructions is only memory, so check if there's a
12359 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12361 bool CanFoldLoad = false;
12363 // Trivial case, when V2 comes from a load.
12364 if (MayFoldVectorLoad(V2))
12365 CanFoldLoad = true;
12367 // When V1 is a load, it can be folded later into a store in isel, example:
12368 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12370 // (MOVLPSmr addr:$src1, VR128:$src2)
12371 // So, recognize this potential and also use MOVLPS or MOVLPD
12372 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12373 CanFoldLoad = true;
12375 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12377 if (HasSSE2 && NumElems == 2)
12378 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12381 // If we don't care about the second element, proceed to use movss.
12382 if (SVOp->getMaskElt(1) != -1)
12383 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12386 // movl and movlp will both match v2i64, but v2i64 is never matched by
12387 // movl earlier because we make it strict to avoid messing with the movlp load
12388 // folding logic (see the code above getMOVLP call). Match it here then,
12389 // this is horrible, but will stay like this until we move all shuffle
12390 // matching to x86 specific nodes. Note that for the 1st condition all
12391 // types are matched with movsd.
12393 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12394 // as to remove this logic from here, as much as possible
12395 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12396 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12397 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12400 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12402 // Invert the operand order and use SHUFPS to match it.
12403 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12404 getShuffleSHUFImmediate(SVOp), DAG);
12407 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12408 SelectionDAG &DAG) {
12410 MVT VT = Load->getSimpleValueType(0);
12411 MVT EVT = VT.getVectorElementType();
12412 SDValue Addr = Load->getOperand(1);
12413 SDValue NewAddr = DAG.getNode(
12414 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12415 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12418 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12419 DAG.getMachineFunction().getMachineMemOperand(
12420 Load->getMemOperand(), 0, EVT.getStoreSize()));
12424 // It is only safe to call this function if isINSERTPSMask is true for
12425 // this shufflevector mask.
12426 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12427 SelectionDAG &DAG) {
12428 // Generate an insertps instruction when inserting an f32 from memory onto a
12429 // v4f32 or when copying a member from one v4f32 to another.
12430 // We also use it for transferring i32 from one register to another,
12431 // since it simply copies the same bits.
12432 // If we're transferring an i32 from memory to a specific element in a
12433 // register, we output a generic DAG that will match the PINSRD
12435 MVT VT = SVOp->getSimpleValueType(0);
12436 MVT EVT = VT.getVectorElementType();
12437 SDValue V1 = SVOp->getOperand(0);
12438 SDValue V2 = SVOp->getOperand(1);
12439 auto Mask = SVOp->getMask();
12440 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12441 "unsupported vector type for insertps/pinsrd");
12443 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12444 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12445 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12449 unsigned DestIndex;
12453 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12456 // If we have 1 element from each vector, we have to check if we're
12457 // changing V1's element's place. If so, we're done. Otherwise, we
12458 // should assume we're changing V2's element's place and behave
12460 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12461 assert(DestIndex <= INT32_MAX && "truncated destination index");
12462 if (FromV1 == FromV2 &&
12463 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12467 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12470 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12471 "More than one element from V1 and from V2, or no elements from one "
12472 "of the vectors. This case should not have returned true from "
12477 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12480 // Get an index into the source vector in the range [0,4) (the mask is
12481 // in the range [0,8) because it can address V1 and V2)
12482 unsigned SrcIndex = Mask[DestIndex] % 4;
12483 if (MayFoldLoad(From)) {
12484 // Trivial case, when From comes from a load and is only used by the
12485 // shuffle. Make it use insertps from the vector that we need from that
12488 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12489 if (!NewLoad.getNode())
12492 if (EVT == MVT::f32) {
12493 // Create this as a scalar to vector to match the instruction pattern.
12494 SDValue LoadScalarToVector =
12495 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12496 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12497 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12499 } else { // EVT == MVT::i32
12500 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12501 // instruction, to match the PINSRD instruction, which loads an i32 to a
12502 // certain vector element.
12503 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12504 DAG.getConstant(DestIndex, MVT::i32));
12508 // Vector-element-to-vector
12509 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12510 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12513 // Reduce a vector shuffle to zext.
12514 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12515 SelectionDAG &DAG) {
12516 // PMOVZX is only available from SSE41.
12517 if (!Subtarget->hasSSE41())
12520 MVT VT = Op.getSimpleValueType();
12522 // Only AVX2 support 256-bit vector integer extending.
12523 if (!Subtarget->hasInt256() && VT.is256BitVector())
12526 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12528 SDValue V1 = Op.getOperand(0);
12529 SDValue V2 = Op.getOperand(1);
12530 unsigned NumElems = VT.getVectorNumElements();
12532 // Extending is an unary operation and the element type of the source vector
12533 // won't be equal to or larger than i64.
12534 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12535 VT.getVectorElementType() == MVT::i64)
12538 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12539 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12540 while ((1U << Shift) < NumElems) {
12541 if (SVOp->getMaskElt(1U << Shift) == 1)
12544 // The maximal ratio is 8, i.e. from i8 to i64.
12549 // Check the shuffle mask.
12550 unsigned Mask = (1U << Shift) - 1;
12551 for (unsigned i = 0; i != NumElems; ++i) {
12552 int EltIdx = SVOp->getMaskElt(i);
12553 if ((i & Mask) != 0 && EltIdx != -1)
12555 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12559 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12560 MVT NeVT = MVT::getIntegerVT(NBits);
12561 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12563 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12566 return DAG.getNode(ISD::BITCAST, DL, VT,
12567 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12570 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12571 SelectionDAG &DAG) {
12572 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12573 MVT VT = Op.getSimpleValueType();
12575 SDValue V1 = Op.getOperand(0);
12576 SDValue V2 = Op.getOperand(1);
12578 if (isZeroShuffle(SVOp))
12579 return getZeroVector(VT, Subtarget, DAG, dl);
12581 // Handle splat operations
12582 if (SVOp->isSplat()) {
12583 // Use vbroadcast whenever the splat comes from a foldable load
12584 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12585 if (Broadcast.getNode())
12589 // Check integer expanding shuffles.
12590 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12591 if (NewOp.getNode())
12594 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12596 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12597 VT == MVT::v32i8) {
12598 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12599 if (NewOp.getNode())
12600 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12601 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12602 // FIXME: Figure out a cleaner way to do this.
12603 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12604 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12605 if (NewOp.getNode()) {
12606 MVT NewVT = NewOp.getSimpleValueType();
12607 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12608 NewVT, true, false))
12609 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12612 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12613 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12614 if (NewOp.getNode()) {
12615 MVT NewVT = NewOp.getSimpleValueType();
12616 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12617 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12626 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12627 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12628 SDValue V1 = Op.getOperand(0);
12629 SDValue V2 = Op.getOperand(1);
12630 MVT VT = Op.getSimpleValueType();
12632 unsigned NumElems = VT.getVectorNumElements();
12633 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12634 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12635 bool V1IsSplat = false;
12636 bool V2IsSplat = false;
12637 bool HasSSE2 = Subtarget->hasSSE2();
12638 bool HasFp256 = Subtarget->hasFp256();
12639 bool HasInt256 = Subtarget->hasInt256();
12640 MachineFunction &MF = DAG.getMachineFunction();
12642 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
12644 // Check if we should use the experimental vector shuffle lowering. If so,
12645 // delegate completely to that code path.
12646 if (ExperimentalVectorShuffleLowering)
12647 return lowerVectorShuffle(Op, Subtarget, DAG);
12649 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12651 if (V1IsUndef && V2IsUndef)
12652 return DAG.getUNDEF(VT);
12654 // When we create a shuffle node we put the UNDEF node to second operand,
12655 // but in some cases the first operand may be transformed to UNDEF.
12656 // In this case we should just commute the node.
12658 return DAG.getCommutedVectorShuffle(*SVOp);
12660 // Vector shuffle lowering takes 3 steps:
12662 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12663 // narrowing and commutation of operands should be handled.
12664 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12666 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12667 // so the shuffle can be broken into other shuffles and the legalizer can
12668 // try the lowering again.
12670 // The general idea is that no vector_shuffle operation should be left to
12671 // be matched during isel, all of them must be converted to a target specific
12674 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12675 // narrowing and commutation of operands should be handled. The actual code
12676 // doesn't include all of those, work in progress...
12677 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12678 if (NewOp.getNode())
12681 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12683 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12684 // unpckh_undef). Only use pshufd if speed is more important than size.
12685 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12686 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12687 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12688 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12690 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12691 V2IsUndef && MayFoldVectorLoad(V1))
12692 return getMOVDDup(Op, dl, V1, DAG);
12694 if (isMOVHLPS_v_undef_Mask(M, VT))
12695 return getMOVHighToLow(Op, dl, DAG);
12697 // Use to match splats
12698 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12699 (VT == MVT::v2f64 || VT == MVT::v2i64))
12700 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12702 if (isPSHUFDMask(M, VT)) {
12703 // The actual implementation will match the mask in the if above and then
12704 // during isel it can match several different instructions, not only pshufd
12705 // as its name says, sad but true, emulate the behavior for now...
12706 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12707 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12709 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12711 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12712 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12714 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12715 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12718 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12722 if (isPALIGNRMask(M, VT, Subtarget))
12723 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12724 getShufflePALIGNRImmediate(SVOp),
12727 if (isVALIGNMask(M, VT, Subtarget))
12728 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12729 getShuffleVALIGNImmediate(SVOp),
12732 // Check if this can be converted into a logical shift.
12733 bool isLeft = false;
12734 unsigned ShAmt = 0;
12736 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12737 if (isShift && ShVal.hasOneUse()) {
12738 // If the shifted value has multiple uses, it may be cheaper to use
12739 // v_set0 + movlhps or movhlps, etc.
12740 MVT EltVT = VT.getVectorElementType();
12741 ShAmt *= EltVT.getSizeInBits();
12742 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12745 if (isMOVLMask(M, VT)) {
12746 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12747 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12748 if (!isMOVLPMask(M, VT)) {
12749 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12750 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12752 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12753 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12757 // FIXME: fold these into legal mask.
12758 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12759 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12761 if (isMOVHLPSMask(M, VT))
12762 return getMOVHighToLow(Op, dl, DAG);
12764 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12765 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12767 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12768 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12770 if (isMOVLPMask(M, VT))
12771 return getMOVLP(Op, dl, DAG, HasSSE2);
12773 if (ShouldXformToMOVHLPS(M, VT) ||
12774 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12775 return DAG.getCommutedVectorShuffle(*SVOp);
12778 // No better options. Use a vshldq / vsrldq.
12779 MVT EltVT = VT.getVectorElementType();
12780 ShAmt *= EltVT.getSizeInBits();
12781 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12784 bool Commuted = false;
12785 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12786 // 1,1,1,1 -> v8i16 though.
12787 BitVector UndefElements;
12788 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12789 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12791 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12792 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12795 // Canonicalize the splat or undef, if present, to be on the RHS.
12796 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12797 CommuteVectorShuffleMask(M, NumElems);
12799 std::swap(V1IsSplat, V2IsSplat);
12803 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12804 // Shuffling low element of v1 into undef, just return v1.
12807 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12808 // the instruction selector will not match, so get a canonical MOVL with
12809 // swapped operands to undo the commute.
12810 return getMOVL(DAG, dl, VT, V2, V1);
12813 if (isUNPCKLMask(M, VT, HasInt256))
12814 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12816 if (isUNPCKHMask(M, VT, HasInt256))
12817 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12820 // Normalize mask so all entries that point to V2 points to its first
12821 // element then try to match unpck{h|l} again. If match, return a
12822 // new vector_shuffle with the corrected mask.p
12823 SmallVector<int, 8> NewMask(M.begin(), M.end());
12824 NormalizeMask(NewMask, NumElems);
12825 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12826 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12827 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12828 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12832 // Commute is back and try unpck* again.
12833 // FIXME: this seems wrong.
12834 CommuteVectorShuffleMask(M, NumElems);
12836 std::swap(V1IsSplat, V2IsSplat);
12838 if (isUNPCKLMask(M, VT, HasInt256))
12839 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12841 if (isUNPCKHMask(M, VT, HasInt256))
12842 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12845 // Normalize the node to match x86 shuffle ops if needed
12846 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12847 return DAG.getCommutedVectorShuffle(*SVOp);
12849 // The checks below are all present in isShuffleMaskLegal, but they are
12850 // inlined here right now to enable us to directly emit target specific
12851 // nodes, and remove one by one until they don't return Op anymore.
12853 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
12854 SVOp->getSplatIndex() == 0 && V2IsUndef) {
12855 if (VT == MVT::v2f64 || VT == MVT::v2i64)
12856 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12859 if (isPSHUFHWMask(M, VT, HasInt256))
12860 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
12861 getShufflePSHUFHWImmediate(SVOp),
12864 if (isPSHUFLWMask(M, VT, HasInt256))
12865 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
12866 getShufflePSHUFLWImmediate(SVOp),
12869 unsigned MaskValue;
12870 if (isBlendMask(M, VT, Subtarget->hasSSE41(), HasInt256, &MaskValue))
12871 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
12873 if (isSHUFPMask(M, VT))
12874 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
12875 getShuffleSHUFImmediate(SVOp), DAG);
12877 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12878 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12879 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12880 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12882 //===--------------------------------------------------------------------===//
12883 // Generate target specific nodes for 128 or 256-bit shuffles only
12884 // supported in the AVX instruction set.
12887 // Handle VMOVDDUPY permutations
12888 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
12889 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
12891 // Handle VPERMILPS/D* permutations
12892 if (isVPERMILPMask(M, VT)) {
12893 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
12894 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
12895 getShuffleSHUFImmediate(SVOp), DAG);
12896 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
12897 getShuffleSHUFImmediate(SVOp), DAG);
12901 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
12902 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
12903 Idx*(NumElems/2), DAG, dl);
12905 // Handle VPERM2F128/VPERM2I128 permutations
12906 if (isVPERM2X128Mask(M, VT, HasFp256))
12907 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
12908 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
12910 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
12911 return getINSERTPS(SVOp, dl, DAG);
12914 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
12915 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
12917 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
12918 VT.is512BitVector()) {
12919 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
12920 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
12921 SmallVector<SDValue, 16> permclMask;
12922 for (unsigned i = 0; i != NumElems; ++i) {
12923 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
12926 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
12928 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
12929 return DAG.getNode(X86ISD::VPERMV, dl, VT,
12930 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
12931 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
12932 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
12935 //===--------------------------------------------------------------------===//
12936 // Since no target specific shuffle was selected for this generic one,
12937 // lower it into other known shuffles. FIXME: this isn't true yet, but
12938 // this is the plan.
12941 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
12942 if (VT == MVT::v8i16) {
12943 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
12944 if (NewOp.getNode())
12948 if (VT == MVT::v16i16 && HasInt256) {
12949 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
12950 if (NewOp.getNode())
12954 if (VT == MVT::v16i8) {
12955 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
12956 if (NewOp.getNode())
12960 if (VT == MVT::v32i8) {
12961 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
12962 if (NewOp.getNode())
12966 // Handle all 128-bit wide vectors with 4 elements, and match them with
12967 // several different shuffle types.
12968 if (NumElems == 4 && VT.is128BitVector())
12969 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
12971 // Handle general 256-bit shuffles
12972 if (VT.is256BitVector())
12973 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
12978 // This function assumes its argument is a BUILD_VECTOR of constants or
12979 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
12981 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
12982 unsigned &MaskValue) {
12984 unsigned NumElems = BuildVector->getNumOperands();
12985 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
12986 unsigned NumLanes = (NumElems - 1) / 8 + 1;
12987 unsigned NumElemsInLane = NumElems / NumLanes;
12989 // Blend for v16i16 should be symetric for the both lanes.
12990 for (unsigned i = 0; i < NumElemsInLane; ++i) {
12991 SDValue EltCond = BuildVector->getOperand(i);
12992 SDValue SndLaneEltCond =
12993 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
12995 int Lane1Cond = -1, Lane2Cond = -1;
12996 if (isa<ConstantSDNode>(EltCond))
12997 Lane1Cond = !isZero(EltCond);
12998 if (isa<ConstantSDNode>(SndLaneEltCond))
12999 Lane2Cond = !isZero(SndLaneEltCond);
13001 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
13002 // Lane1Cond != 0, means we want the first argument.
13003 // Lane1Cond == 0, means we want the second argument.
13004 // The encoding of this argument is 0 for the first argument, 1
13005 // for the second. Therefore, invert the condition.
13006 MaskValue |= !Lane1Cond << i;
13007 else if (Lane1Cond < 0)
13008 MaskValue |= !Lane2Cond << i;
13015 /// \brief Try to lower a VSELECT instruction to an immediate-controlled blend
13017 static SDValue lowerVSELECTtoBLENDI(SDValue Op, const X86Subtarget *Subtarget,
13018 SelectionDAG &DAG) {
13019 SDValue Cond = Op.getOperand(0);
13020 SDValue LHS = Op.getOperand(1);
13021 SDValue RHS = Op.getOperand(2);
13023 MVT VT = Op.getSimpleValueType();
13024 MVT EltVT = VT.getVectorElementType();
13025 unsigned NumElems = VT.getVectorNumElements();
13027 // There is no blend with immediate in AVX-512.
13028 if (VT.is512BitVector())
13031 if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
13033 if (!Subtarget->hasInt256() && VT == MVT::v16i16)
13036 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
13039 // Check the mask for BLEND and build the value.
13040 unsigned MaskValue = 0;
13041 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
13044 // Convert i32 vectors to floating point if it is not AVX2.
13045 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
13047 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
13048 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
13050 LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
13051 RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
13054 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
13055 DAG.getConstant(MaskValue, MVT::i32));
13056 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
13059 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
13060 // A vselect where all conditions and data are constants can be optimized into
13061 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
13062 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
13063 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
13064 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
13067 SDValue BlendOp = lowerVSELECTtoBLENDI(Op, Subtarget, DAG);
13068 if (BlendOp.getNode())
13071 // Some types for vselect were previously set to Expand, not Legal or
13072 // Custom. Return an empty SDValue so we fall-through to Expand, after
13073 // the Custom lowering phase.
13074 MVT VT = Op.getSimpleValueType();
13075 switch (VT.SimpleTy) {
13080 if (Subtarget->hasBWI() && Subtarget->hasVLX())
13085 // We couldn't create a "Blend with immediate" node.
13086 // This node should still be legal, but we'll have to emit a blendv*
13091 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
13092 MVT VT = Op.getSimpleValueType();
13095 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
13098 if (VT.getSizeInBits() == 8) {
13099 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
13100 Op.getOperand(0), Op.getOperand(1));
13101 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13102 DAG.getValueType(VT));
13103 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13106 if (VT.getSizeInBits() == 16) {
13107 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13108 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
13110 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13111 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13112 DAG.getNode(ISD::BITCAST, dl,
13115 Op.getOperand(1)));
13116 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
13117 Op.getOperand(0), Op.getOperand(1));
13118 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13119 DAG.getValueType(VT));
13120 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13123 if (VT == MVT::f32) {
13124 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
13125 // the result back to FR32 register. It's only worth matching if the
13126 // result has a single use which is a store or a bitcast to i32. And in
13127 // the case of a store, it's not worth it if the index is a constant 0,
13128 // because a MOVSSmr can be used instead, which is smaller and faster.
13129 if (!Op.hasOneUse())
13131 SDNode *User = *Op.getNode()->use_begin();
13132 if ((User->getOpcode() != ISD::STORE ||
13133 (isa<ConstantSDNode>(Op.getOperand(1)) &&
13134 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
13135 (User->getOpcode() != ISD::BITCAST ||
13136 User->getValueType(0) != MVT::i32))
13138 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13139 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
13142 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
13145 if (VT == MVT::i32 || VT == MVT::i64) {
13146 // ExtractPS/pextrq works with constant index.
13147 if (isa<ConstantSDNode>(Op.getOperand(1)))
13153 /// Extract one bit from mask vector, like v16i1 or v8i1.
13154 /// AVX-512 feature.
13156 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
13157 SDValue Vec = Op.getOperand(0);
13159 MVT VecVT = Vec.getSimpleValueType();
13160 SDValue Idx = Op.getOperand(1);
13161 MVT EltVT = Op.getSimpleValueType();
13163 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
13164 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
13165 "Unexpected vector type in ExtractBitFromMaskVector");
13167 // variable index can't be handled in mask registers,
13168 // extend vector to VR512
13169 if (!isa<ConstantSDNode>(Idx)) {
13170 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13171 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
13172 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
13173 ExtVT.getVectorElementType(), Ext, Idx);
13174 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
13177 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13178 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13179 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
13180 rc = getRegClassFor(MVT::v16i1);
13181 unsigned MaxSift = rc->getSize()*8 - 1;
13182 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
13183 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13184 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
13185 DAG.getConstant(MaxSift, MVT::i8));
13186 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
13187 DAG.getIntPtrConstant(0));
13191 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
13192 SelectionDAG &DAG) const {
13194 SDValue Vec = Op.getOperand(0);
13195 MVT VecVT = Vec.getSimpleValueType();
13196 SDValue Idx = Op.getOperand(1);
13198 if (Op.getSimpleValueType() == MVT::i1)
13199 return ExtractBitFromMaskVector(Op, DAG);
13201 if (!isa<ConstantSDNode>(Idx)) {
13202 if (VecVT.is512BitVector() ||
13203 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
13204 VecVT.getVectorElementType().getSizeInBits() == 32)) {
13207 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
13208 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
13209 MaskEltVT.getSizeInBits());
13211 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
13212 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
13213 getZeroVector(MaskVT, Subtarget, DAG, dl),
13214 Idx, DAG.getConstant(0, getPointerTy()));
13215 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
13216 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
13217 Perm, DAG.getConstant(0, getPointerTy()));
13222 // If this is a 256-bit vector result, first extract the 128-bit vector and
13223 // then extract the element from the 128-bit vector.
13224 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
13226 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13227 // Get the 128-bit vector.
13228 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
13229 MVT EltVT = VecVT.getVectorElementType();
13231 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
13233 //if (IdxVal >= NumElems/2)
13234 // IdxVal -= NumElems/2;
13235 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
13236 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
13237 DAG.getConstant(IdxVal, MVT::i32));
13240 assert(VecVT.is128BitVector() && "Unexpected vector length");
13242 if (Subtarget->hasSSE41()) {
13243 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
13248 MVT VT = Op.getSimpleValueType();
13249 // TODO: handle v16i8.
13250 if (VT.getSizeInBits() == 16) {
13251 SDValue Vec = Op.getOperand(0);
13252 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13254 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13255 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13256 DAG.getNode(ISD::BITCAST, dl,
13258 Op.getOperand(1)));
13259 // Transform it so it match pextrw which produces a 32-bit result.
13260 MVT EltVT = MVT::i32;
13261 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
13262 Op.getOperand(0), Op.getOperand(1));
13263 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
13264 DAG.getValueType(VT));
13265 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13268 if (VT.getSizeInBits() == 32) {
13269 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13273 // SHUFPS the element to the lowest double word, then movss.
13274 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
13275 MVT VVT = Op.getOperand(0).getSimpleValueType();
13276 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13277 DAG.getUNDEF(VVT), Mask);
13278 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13279 DAG.getIntPtrConstant(0));
13282 if (VT.getSizeInBits() == 64) {
13283 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
13284 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
13285 // to match extract_elt for f64.
13286 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13290 // UNPCKHPD the element to the lowest double word, then movsd.
13291 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
13292 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
13293 int Mask[2] = { 1, -1 };
13294 MVT VVT = Op.getOperand(0).getSimpleValueType();
13295 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13296 DAG.getUNDEF(VVT), Mask);
13297 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13298 DAG.getIntPtrConstant(0));
13304 /// Insert one bit to mask vector, like v16i1 or v8i1.
13305 /// AVX-512 feature.
13307 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13309 SDValue Vec = Op.getOperand(0);
13310 SDValue Elt = Op.getOperand(1);
13311 SDValue Idx = Op.getOperand(2);
13312 MVT VecVT = Vec.getSimpleValueType();
13314 if (!isa<ConstantSDNode>(Idx)) {
13315 // Non constant index. Extend source and destination,
13316 // insert element and then truncate the result.
13317 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13318 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13319 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13320 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13321 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13322 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13325 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13326 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13327 if (Vec.getOpcode() == ISD::UNDEF)
13328 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13329 DAG.getConstant(IdxVal, MVT::i8));
13330 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13331 unsigned MaxSift = rc->getSize()*8 - 1;
13332 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13333 DAG.getConstant(MaxSift, MVT::i8));
13334 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13335 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13336 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13339 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13340 SelectionDAG &DAG) const {
13341 MVT VT = Op.getSimpleValueType();
13342 MVT EltVT = VT.getVectorElementType();
13344 if (EltVT == MVT::i1)
13345 return InsertBitToMaskVector(Op, DAG);
13348 SDValue N0 = Op.getOperand(0);
13349 SDValue N1 = Op.getOperand(1);
13350 SDValue N2 = Op.getOperand(2);
13351 if (!isa<ConstantSDNode>(N2))
13353 auto *N2C = cast<ConstantSDNode>(N2);
13354 unsigned IdxVal = N2C->getZExtValue();
13356 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13357 // into that, and then insert the subvector back into the result.
13358 if (VT.is256BitVector() || VT.is512BitVector()) {
13359 // Get the desired 128-bit vector half.
13360 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13362 // Insert the element into the desired half.
13363 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13364 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13366 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13367 DAG.getConstant(IdxIn128, MVT::i32));
13369 // Insert the changed part back to the 256-bit vector
13370 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13372 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13374 if (Subtarget->hasSSE41()) {
13375 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13377 if (VT == MVT::v8i16) {
13378 Opc = X86ISD::PINSRW;
13380 assert(VT == MVT::v16i8);
13381 Opc = X86ISD::PINSRB;
13384 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13386 if (N1.getValueType() != MVT::i32)
13387 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13388 if (N2.getValueType() != MVT::i32)
13389 N2 = DAG.getIntPtrConstant(IdxVal);
13390 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13393 if (EltVT == MVT::f32) {
13394 // Bits [7:6] of the constant are the source select. This will always be
13395 // zero here. The DAG Combiner may combine an extract_elt index into
13397 // bits. For example (insert (extract, 3), 2) could be matched by
13399 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13400 // Bits [5:4] of the constant are the destination select. This is the
13401 // value of the incoming immediate.
13402 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13403 // combine either bitwise AND or insert of float 0.0 to set these bits.
13404 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13405 // Create this as a scalar to vector..
13406 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13407 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13410 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13411 // PINSR* works with constant index.
13416 if (EltVT == MVT::i8)
13419 if (EltVT.getSizeInBits() == 16) {
13420 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13421 // as its second argument.
13422 if (N1.getValueType() != MVT::i32)
13423 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13424 if (N2.getValueType() != MVT::i32)
13425 N2 = DAG.getIntPtrConstant(IdxVal);
13426 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13431 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13433 MVT OpVT = Op.getSimpleValueType();
13435 // If this is a 256-bit vector result, first insert into a 128-bit
13436 // vector and then insert into the 256-bit vector.
13437 if (!OpVT.is128BitVector()) {
13438 // Insert into a 128-bit vector.
13439 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13440 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13441 OpVT.getVectorNumElements() / SizeFactor);
13443 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13445 // Insert the 128-bit vector.
13446 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13449 if (OpVT == MVT::v1i64 &&
13450 Op.getOperand(0).getValueType() == MVT::i64)
13451 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13453 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13454 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13455 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13456 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13459 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13460 // a simple subregister reference or explicit instructions to grab
13461 // upper bits of a vector.
13462 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13463 SelectionDAG &DAG) {
13465 SDValue In = Op.getOperand(0);
13466 SDValue Idx = Op.getOperand(1);
13467 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13468 MVT ResVT = Op.getSimpleValueType();
13469 MVT InVT = In.getSimpleValueType();
13471 if (Subtarget->hasFp256()) {
13472 if (ResVT.is128BitVector() &&
13473 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13474 isa<ConstantSDNode>(Idx)) {
13475 return Extract128BitVector(In, IdxVal, DAG, dl);
13477 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13478 isa<ConstantSDNode>(Idx)) {
13479 return Extract256BitVector(In, IdxVal, DAG, dl);
13485 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13486 // simple superregister reference or explicit instructions to insert
13487 // the upper bits of a vector.
13488 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13489 SelectionDAG &DAG) {
13490 if (!Subtarget->hasAVX())
13494 SDValue Vec = Op.getOperand(0);
13495 SDValue SubVec = Op.getOperand(1);
13496 SDValue Idx = Op.getOperand(2);
13498 if (!isa<ConstantSDNode>(Idx))
13501 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13502 MVT OpVT = Op.getSimpleValueType();
13503 MVT SubVecVT = SubVec.getSimpleValueType();
13505 // Fold two 16-byte subvector loads into one 32-byte load:
13506 // (insert_subvector (insert_subvector undef, (load addr), 0),
13507 // (load addr + 16), Elts/2)
13509 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
13510 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
13511 OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
13512 !Subtarget->isUnalignedMem32Slow()) {
13513 SDValue SubVec2 = Vec.getOperand(1);
13514 if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
13515 if (Idx2->getZExtValue() == 0) {
13516 SDValue Ops[] = { SubVec2, SubVec };
13517 SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
13524 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13525 SubVecVT.is128BitVector())
13526 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13528 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
13529 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13534 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13535 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13536 // one of the above mentioned nodes. It has to be wrapped because otherwise
13537 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13538 // be used to form addressing mode. These wrapped nodes will be selected
13541 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13542 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13544 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13545 // global base reg.
13546 unsigned char OpFlag = 0;
13547 unsigned WrapperKind = X86ISD::Wrapper;
13548 CodeModel::Model M = DAG.getTarget().getCodeModel();
13550 if (Subtarget->isPICStyleRIPRel() &&
13551 (M == CodeModel::Small || M == CodeModel::Kernel))
13552 WrapperKind = X86ISD::WrapperRIP;
13553 else if (Subtarget->isPICStyleGOT())
13554 OpFlag = X86II::MO_GOTOFF;
13555 else if (Subtarget->isPICStyleStubPIC())
13556 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13558 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13559 CP->getAlignment(),
13560 CP->getOffset(), OpFlag);
13562 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13563 // With PIC, the address is actually $g + Offset.
13565 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13566 DAG.getNode(X86ISD::GlobalBaseReg,
13567 SDLoc(), getPointerTy()),
13574 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13575 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13577 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13578 // global base reg.
13579 unsigned char OpFlag = 0;
13580 unsigned WrapperKind = X86ISD::Wrapper;
13581 CodeModel::Model M = DAG.getTarget().getCodeModel();
13583 if (Subtarget->isPICStyleRIPRel() &&
13584 (M == CodeModel::Small || M == CodeModel::Kernel))
13585 WrapperKind = X86ISD::WrapperRIP;
13586 else if (Subtarget->isPICStyleGOT())
13587 OpFlag = X86II::MO_GOTOFF;
13588 else if (Subtarget->isPICStyleStubPIC())
13589 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13591 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13594 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13596 // With PIC, the address is actually $g + Offset.
13598 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13599 DAG.getNode(X86ISD::GlobalBaseReg,
13600 SDLoc(), getPointerTy()),
13607 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13608 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13610 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13611 // global base reg.
13612 unsigned char OpFlag = 0;
13613 unsigned WrapperKind = X86ISD::Wrapper;
13614 CodeModel::Model M = DAG.getTarget().getCodeModel();
13616 if (Subtarget->isPICStyleRIPRel() &&
13617 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13618 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13619 OpFlag = X86II::MO_GOTPCREL;
13620 WrapperKind = X86ISD::WrapperRIP;
13621 } else if (Subtarget->isPICStyleGOT()) {
13622 OpFlag = X86II::MO_GOT;
13623 } else if (Subtarget->isPICStyleStubPIC()) {
13624 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13625 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13626 OpFlag = X86II::MO_DARWIN_NONLAZY;
13629 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13632 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13634 // With PIC, the address is actually $g + Offset.
13635 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13636 !Subtarget->is64Bit()) {
13637 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13638 DAG.getNode(X86ISD::GlobalBaseReg,
13639 SDLoc(), getPointerTy()),
13643 // For symbols that require a load from a stub to get the address, emit the
13645 if (isGlobalStubReference(OpFlag))
13646 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13647 MachinePointerInfo::getGOT(), false, false, false, 0);
13653 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13654 // Create the TargetBlockAddressAddress node.
13655 unsigned char OpFlags =
13656 Subtarget->ClassifyBlockAddressReference();
13657 CodeModel::Model M = DAG.getTarget().getCodeModel();
13658 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13659 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13661 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13664 if (Subtarget->isPICStyleRIPRel() &&
13665 (M == CodeModel::Small || M == CodeModel::Kernel))
13666 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13668 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13670 // With PIC, the address is actually $g + Offset.
13671 if (isGlobalRelativeToPICBase(OpFlags)) {
13672 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13673 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13681 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13682 int64_t Offset, SelectionDAG &DAG) const {
13683 // Create the TargetGlobalAddress node, folding in the constant
13684 // offset if it is legal.
13685 unsigned char OpFlags =
13686 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13687 CodeModel::Model M = DAG.getTarget().getCodeModel();
13689 if (OpFlags == X86II::MO_NO_FLAG &&
13690 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13691 // A direct static reference to a global.
13692 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13695 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13698 if (Subtarget->isPICStyleRIPRel() &&
13699 (M == CodeModel::Small || M == CodeModel::Kernel))
13700 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13702 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13704 // With PIC, the address is actually $g + Offset.
13705 if (isGlobalRelativeToPICBase(OpFlags)) {
13706 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13707 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13711 // For globals that require a load from a stub to get the address, emit the
13713 if (isGlobalStubReference(OpFlags))
13714 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13715 MachinePointerInfo::getGOT(), false, false, false, 0);
13717 // If there was a non-zero offset that we didn't fold, create an explicit
13718 // addition for it.
13720 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13721 DAG.getConstant(Offset, getPointerTy()));
13727 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13728 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13729 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13730 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13734 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13735 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13736 unsigned char OperandFlags, bool LocalDynamic = false) {
13737 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13738 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13740 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13741 GA->getValueType(0),
13745 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13749 SDValue Ops[] = { Chain, TGA, *InFlag };
13750 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13752 SDValue Ops[] = { Chain, TGA };
13753 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13756 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13757 MFI->setAdjustsStack(true);
13758 MFI->setHasCalls(true);
13760 SDValue Flag = Chain.getValue(1);
13761 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13764 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13766 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13769 SDLoc dl(GA); // ? function entry point might be better
13770 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13771 DAG.getNode(X86ISD::GlobalBaseReg,
13772 SDLoc(), PtrVT), InFlag);
13773 InFlag = Chain.getValue(1);
13775 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13778 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13780 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13782 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13783 X86::RAX, X86II::MO_TLSGD);
13786 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13792 // Get the start address of the TLS block for this module.
13793 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13794 .getInfo<X86MachineFunctionInfo>();
13795 MFI->incNumLocalDynamicTLSAccesses();
13799 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13800 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13803 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13804 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13805 InFlag = Chain.getValue(1);
13806 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13807 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13810 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13814 unsigned char OperandFlags = X86II::MO_DTPOFF;
13815 unsigned WrapperKind = X86ISD::Wrapper;
13816 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13817 GA->getValueType(0),
13818 GA->getOffset(), OperandFlags);
13819 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13821 // Add x@dtpoff with the base.
13822 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13825 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13826 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13827 const EVT PtrVT, TLSModel::Model model,
13828 bool is64Bit, bool isPIC) {
13831 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13832 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13833 is64Bit ? 257 : 256));
13835 SDValue ThreadPointer =
13836 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13837 MachinePointerInfo(Ptr), false, false, false, 0);
13839 unsigned char OperandFlags = 0;
13840 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13842 unsigned WrapperKind = X86ISD::Wrapper;
13843 if (model == TLSModel::LocalExec) {
13844 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13845 } else if (model == TLSModel::InitialExec) {
13847 OperandFlags = X86II::MO_GOTTPOFF;
13848 WrapperKind = X86ISD::WrapperRIP;
13850 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13853 llvm_unreachable("Unexpected model");
13856 // emit "addl x@ntpoff,%eax" (local exec)
13857 // or "addl x@indntpoff,%eax" (initial exec)
13858 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13860 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13861 GA->getOffset(), OperandFlags);
13862 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13864 if (model == TLSModel::InitialExec) {
13865 if (isPIC && !is64Bit) {
13866 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13867 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13871 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13872 MachinePointerInfo::getGOT(), false, false, false, 0);
13875 // The address of the thread local variable is the add of the thread
13876 // pointer with the offset of the variable.
13877 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13881 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13883 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13884 const GlobalValue *GV = GA->getGlobal();
13886 if (Subtarget->isTargetELF()) {
13887 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13890 case TLSModel::GeneralDynamic:
13891 if (Subtarget->is64Bit())
13892 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
13893 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
13894 case TLSModel::LocalDynamic:
13895 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
13896 Subtarget->is64Bit());
13897 case TLSModel::InitialExec:
13898 case TLSModel::LocalExec:
13899 return LowerToTLSExecModel(
13900 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
13901 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
13903 llvm_unreachable("Unknown TLS model.");
13906 if (Subtarget->isTargetDarwin()) {
13907 // Darwin only has one model of TLS. Lower to that.
13908 unsigned char OpFlag = 0;
13909 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
13910 X86ISD::WrapperRIP : X86ISD::Wrapper;
13912 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13913 // global base reg.
13914 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
13915 !Subtarget->is64Bit();
13917 OpFlag = X86II::MO_TLVP_PIC_BASE;
13919 OpFlag = X86II::MO_TLVP;
13921 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
13922 GA->getValueType(0),
13923 GA->getOffset(), OpFlag);
13924 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13926 // With PIC32, the address is actually $g + Offset.
13928 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13929 DAG.getNode(X86ISD::GlobalBaseReg,
13930 SDLoc(), getPointerTy()),
13933 // Lowering the machine isd will make sure everything is in the right
13935 SDValue Chain = DAG.getEntryNode();
13936 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13937 SDValue Args[] = { Chain, Offset };
13938 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
13940 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
13941 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13942 MFI->setAdjustsStack(true);
13944 // And our return value (tls address) is in the standard call return value
13946 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
13947 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
13948 Chain.getValue(1));
13951 if (Subtarget->isTargetKnownWindowsMSVC() ||
13952 Subtarget->isTargetWindowsGNU()) {
13953 // Just use the implicit TLS architecture
13954 // Need to generate someting similar to:
13955 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
13957 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
13958 // mov rcx, qword [rdx+rcx*8]
13959 // mov eax, .tls$:tlsvar
13960 // [rax+rcx] contains the address
13961 // Windows 64bit: gs:0x58
13962 // Windows 32bit: fs:__tls_array
13965 SDValue Chain = DAG.getEntryNode();
13967 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
13968 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
13969 // use its literal value of 0x2C.
13970 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
13971 ? Type::getInt8PtrTy(*DAG.getContext(),
13973 : Type::getInt32PtrTy(*DAG.getContext(),
13977 Subtarget->is64Bit()
13978 ? DAG.getIntPtrConstant(0x58)
13979 : (Subtarget->isTargetWindowsGNU()
13980 ? DAG.getIntPtrConstant(0x2C)
13981 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
13983 SDValue ThreadPointer =
13984 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
13985 MachinePointerInfo(Ptr), false, false, false, 0);
13987 // Load the _tls_index variable
13988 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
13989 if (Subtarget->is64Bit())
13990 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
13991 IDX, MachinePointerInfo(), MVT::i32,
13992 false, false, false, 0);
13994 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
13995 false, false, false, 0);
13997 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
13999 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
14001 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
14002 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
14003 false, false, false, 0);
14005 // Get the offset of start of .tls section
14006 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
14007 GA->getValueType(0),
14008 GA->getOffset(), X86II::MO_SECREL);
14009 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
14011 // The address of the thread local variable is the add of the thread
14012 // pointer with the offset of the variable.
14013 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
14016 llvm_unreachable("TLS not implemented for this target.");
14019 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
14020 /// and take a 2 x i32 value to shift plus a shift amount.
14021 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
14022 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
14023 MVT VT = Op.getSimpleValueType();
14024 unsigned VTBits = VT.getSizeInBits();
14026 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
14027 SDValue ShOpLo = Op.getOperand(0);
14028 SDValue ShOpHi = Op.getOperand(1);
14029 SDValue ShAmt = Op.getOperand(2);
14030 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
14031 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
14033 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14034 DAG.getConstant(VTBits - 1, MVT::i8));
14035 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
14036 DAG.getConstant(VTBits - 1, MVT::i8))
14037 : DAG.getConstant(0, VT);
14039 SDValue Tmp2, Tmp3;
14040 if (Op.getOpcode() == ISD::SHL_PARTS) {
14041 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
14042 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
14044 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
14045 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
14048 // If the shift amount is larger or equal than the width of a part we can't
14049 // rely on the results of shld/shrd. Insert a test and select the appropriate
14050 // values for large shift amounts.
14051 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14052 DAG.getConstant(VTBits, MVT::i8));
14053 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
14054 AndNode, DAG.getConstant(0, MVT::i8));
14057 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
14058 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
14059 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
14061 if (Op.getOpcode() == ISD::SHL_PARTS) {
14062 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14063 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14065 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14066 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14069 SDValue Ops[2] = { Lo, Hi };
14070 return DAG.getMergeValues(Ops, dl);
14073 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
14074 SelectionDAG &DAG) const {
14075 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
14078 if (SrcVT.isVector()) {
14079 if (SrcVT.getVectorElementType() == MVT::i1) {
14080 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
14081 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14082 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
14083 Op.getOperand(0)));
14088 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
14089 "Unknown SINT_TO_FP to lower!");
14091 // These are really Legal; return the operand so the caller accepts it as
14093 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
14095 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
14096 Subtarget->is64Bit()) {
14100 unsigned Size = SrcVT.getSizeInBits()/8;
14101 MachineFunction &MF = DAG.getMachineFunction();
14102 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
14103 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14104 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14106 MachinePointerInfo::getFixedStack(SSFI),
14108 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
14111 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
14113 SelectionDAG &DAG) const {
14117 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
14119 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
14121 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
14123 unsigned ByteSize = SrcVT.getSizeInBits()/8;
14125 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
14126 MachineMemOperand *MMO;
14128 int SSFI = FI->getIndex();
14130 DAG.getMachineFunction()
14131 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14132 MachineMemOperand::MOLoad, ByteSize, ByteSize);
14134 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
14135 StackSlot = StackSlot.getOperand(1);
14137 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
14138 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
14140 Tys, Ops, SrcVT, MMO);
14143 Chain = Result.getValue(1);
14144 SDValue InFlag = Result.getValue(2);
14146 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
14147 // shouldn't be necessary except that RFP cannot be live across
14148 // multiple blocks. When stackifier is fixed, they can be uncoupled.
14149 MachineFunction &MF = DAG.getMachineFunction();
14150 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
14151 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
14152 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14153 Tys = DAG.getVTList(MVT::Other);
14155 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
14157 MachineMemOperand *MMO =
14158 DAG.getMachineFunction()
14159 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14160 MachineMemOperand::MOStore, SSFISize, SSFISize);
14162 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
14163 Ops, Op.getValueType(), MMO);
14164 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
14165 MachinePointerInfo::getFixedStack(SSFI),
14166 false, false, false, 0);
14172 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
14173 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
14174 SelectionDAG &DAG) const {
14175 // This algorithm is not obvious. Here it is what we're trying to output:
14178 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
14179 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
14181 haddpd %xmm0, %xmm0
14183 pshufd $0x4e, %xmm0, %xmm1
14189 LLVMContext *Context = DAG.getContext();
14191 // Build some magic constants.
14192 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
14193 Constant *C0 = ConstantDataVector::get(*Context, CV0);
14194 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
14196 SmallVector<Constant*,2> CV1;
14198 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14199 APInt(64, 0x4330000000000000ULL))));
14201 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14202 APInt(64, 0x4530000000000000ULL))));
14203 Constant *C1 = ConstantVector::get(CV1);
14204 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
14206 // Load the 64-bit value into an XMM register.
14207 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
14209 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
14210 MachinePointerInfo::getConstantPool(),
14211 false, false, false, 16);
14212 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
14213 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
14216 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
14217 MachinePointerInfo::getConstantPool(),
14218 false, false, false, 16);
14219 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
14220 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
14223 if (Subtarget->hasSSE3()) {
14224 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
14225 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
14227 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
14228 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
14230 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
14231 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
14235 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
14236 DAG.getIntPtrConstant(0));
14239 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
14240 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
14241 SelectionDAG &DAG) const {
14243 // FP constant to bias correct the final result.
14244 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
14247 // Load the 32-bit value into an XMM register.
14248 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
14251 // Zero out the upper parts of the register.
14252 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
14254 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14255 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
14256 DAG.getIntPtrConstant(0));
14258 // Or the load with the bias.
14259 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
14260 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14261 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14262 MVT::v2f64, Load)),
14263 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14264 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14265 MVT::v2f64, Bias)));
14266 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14267 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
14268 DAG.getIntPtrConstant(0));
14270 // Subtract the bias.
14271 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
14273 // Handle final rounding.
14274 EVT DestVT = Op.getValueType();
14276 if (DestVT.bitsLT(MVT::f64))
14277 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
14278 DAG.getIntPtrConstant(0));
14279 if (DestVT.bitsGT(MVT::f64))
14280 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
14282 // Handle final rounding.
14286 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
14287 const X86Subtarget &Subtarget) {
14288 // The algorithm is the following:
14289 // #ifdef __SSE4_1__
14290 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14291 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14292 // (uint4) 0x53000000, 0xaa);
14294 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14295 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14297 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14298 // return (float4) lo + fhi;
14301 SDValue V = Op->getOperand(0);
14302 EVT VecIntVT = V.getValueType();
14303 bool Is128 = VecIntVT == MVT::v4i32;
14304 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
14305 // If we convert to something else than the supported type, e.g., to v4f64,
14307 if (VecFloatVT != Op->getValueType(0))
14310 unsigned NumElts = VecIntVT.getVectorNumElements();
14311 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
14312 "Unsupported custom type");
14313 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14315 // In the #idef/#else code, we have in common:
14316 // - The vector of constants:
14322 // Create the splat vector for 0x4b000000.
14323 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14324 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14325 CstLow, CstLow, CstLow, CstLow};
14326 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14327 makeArrayRef(&CstLowArray[0], NumElts));
14328 // Create the splat vector for 0x53000000.
14329 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14330 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14331 CstHigh, CstHigh, CstHigh, CstHigh};
14332 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14333 makeArrayRef(&CstHighArray[0], NumElts));
14335 // Create the right shift.
14336 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14337 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14338 CstShift, CstShift, CstShift, CstShift};
14339 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14340 makeArrayRef(&CstShiftArray[0], NumElts));
14341 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14344 if (Subtarget.hasSSE41()) {
14345 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14346 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14347 SDValue VecCstLowBitcast =
14348 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14349 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14350 // Low will be bitcasted right away, so do not bother bitcasting back to its
14352 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14353 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14354 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14355 // (uint4) 0x53000000, 0xaa);
14356 SDValue VecCstHighBitcast =
14357 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14358 SDValue VecShiftBitcast =
14359 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14360 // High will be bitcasted right away, so do not bother bitcasting back to
14361 // its original type.
14362 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14363 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14365 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14366 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14367 CstMask, CstMask, CstMask);
14368 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14369 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14370 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14372 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14373 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14376 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14377 SDValue CstFAdd = DAG.getConstantFP(
14378 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14379 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14380 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14381 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14382 makeArrayRef(&CstFAddArray[0], NumElts));
14384 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14385 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14387 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14388 // return (float4) lo + fhi;
14389 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14390 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14393 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14394 SelectionDAG &DAG) const {
14395 SDValue N0 = Op.getOperand(0);
14396 MVT SVT = N0.getSimpleValueType();
14399 switch (SVT.SimpleTy) {
14401 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14406 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14407 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14408 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14412 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14414 llvm_unreachable(nullptr);
14417 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14418 SelectionDAG &DAG) const {
14419 SDValue N0 = Op.getOperand(0);
14422 if (Op.getValueType().isVector())
14423 return lowerUINT_TO_FP_vec(Op, DAG);
14425 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14426 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14427 // the optimization here.
14428 if (DAG.SignBitIsZero(N0))
14429 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14431 MVT SrcVT = N0.getSimpleValueType();
14432 MVT DstVT = Op.getSimpleValueType();
14433 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14434 return LowerUINT_TO_FP_i64(Op, DAG);
14435 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14436 return LowerUINT_TO_FP_i32(Op, DAG);
14437 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14440 // Make a 64-bit buffer, and use it to build an FILD.
14441 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14442 if (SrcVT == MVT::i32) {
14443 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14444 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14445 getPointerTy(), StackSlot, WordOff);
14446 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14447 StackSlot, MachinePointerInfo(),
14449 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14450 OffsetSlot, MachinePointerInfo(),
14452 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14456 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14457 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14458 StackSlot, MachinePointerInfo(),
14460 // For i64 source, we need to add the appropriate power of 2 if the input
14461 // was negative. This is the same as the optimization in
14462 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14463 // we must be careful to do the computation in x87 extended precision, not
14464 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14465 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14466 MachineMemOperand *MMO =
14467 DAG.getMachineFunction()
14468 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14469 MachineMemOperand::MOLoad, 8, 8);
14471 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14472 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14473 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14476 APInt FF(32, 0x5F800000ULL);
14478 // Check whether the sign bit is set.
14479 SDValue SignSet = DAG.getSetCC(dl,
14480 getSetCCResultType(*DAG.getContext(), MVT::i64),
14481 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14484 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14485 SDValue FudgePtr = DAG.getConstantPool(
14486 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14489 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14490 SDValue Zero = DAG.getIntPtrConstant(0);
14491 SDValue Four = DAG.getIntPtrConstant(4);
14492 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14494 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14496 // Load the value out, extending it from f32 to f80.
14497 // FIXME: Avoid the extend by constructing the right constant pool?
14498 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14499 FudgePtr, MachinePointerInfo::getConstantPool(),
14500 MVT::f32, false, false, false, 4);
14501 // Extend everything to 80 bits to force it to be done on x87.
14502 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14503 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14506 std::pair<SDValue,SDValue>
14507 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14508 bool IsSigned, bool IsReplace) const {
14511 EVT DstTy = Op.getValueType();
14513 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14514 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14518 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14519 DstTy.getSimpleVT() >= MVT::i16 &&
14520 "Unknown FP_TO_INT to lower!");
14522 // These are really Legal.
14523 if (DstTy == MVT::i32 &&
14524 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14525 return std::make_pair(SDValue(), SDValue());
14526 if (Subtarget->is64Bit() &&
14527 DstTy == MVT::i64 &&
14528 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14529 return std::make_pair(SDValue(), SDValue());
14531 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14532 // stack slot, or into the FTOL runtime function.
14533 MachineFunction &MF = DAG.getMachineFunction();
14534 unsigned MemSize = DstTy.getSizeInBits()/8;
14535 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14536 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14539 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14540 Opc = X86ISD::WIN_FTOL;
14542 switch (DstTy.getSimpleVT().SimpleTy) {
14543 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14544 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14545 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14546 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14549 SDValue Chain = DAG.getEntryNode();
14550 SDValue Value = Op.getOperand(0);
14551 EVT TheVT = Op.getOperand(0).getValueType();
14552 // FIXME This causes a redundant load/store if the SSE-class value is already
14553 // in memory, such as if it is on the callstack.
14554 if (isScalarFPTypeInSSEReg(TheVT)) {
14555 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14556 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14557 MachinePointerInfo::getFixedStack(SSFI),
14559 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14561 Chain, StackSlot, DAG.getValueType(TheVT)
14564 MachineMemOperand *MMO =
14565 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14566 MachineMemOperand::MOLoad, MemSize, MemSize);
14567 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14568 Chain = Value.getValue(1);
14569 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14570 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14573 MachineMemOperand *MMO =
14574 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14575 MachineMemOperand::MOStore, MemSize, MemSize);
14577 if (Opc != X86ISD::WIN_FTOL) {
14578 // Build the FP_TO_INT*_IN_MEM
14579 SDValue Ops[] = { Chain, Value, StackSlot };
14580 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14582 return std::make_pair(FIST, StackSlot);
14584 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14585 DAG.getVTList(MVT::Other, MVT::Glue),
14587 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14588 MVT::i32, ftol.getValue(1));
14589 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14590 MVT::i32, eax.getValue(2));
14591 SDValue Ops[] = { eax, edx };
14592 SDValue pair = IsReplace
14593 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14594 : DAG.getMergeValues(Ops, DL);
14595 return std::make_pair(pair, SDValue());
14599 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14600 const X86Subtarget *Subtarget) {
14601 MVT VT = Op->getSimpleValueType(0);
14602 SDValue In = Op->getOperand(0);
14603 MVT InVT = In.getSimpleValueType();
14606 // Optimize vectors in AVX mode:
14609 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14610 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14611 // Concat upper and lower parts.
14614 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14615 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14616 // Concat upper and lower parts.
14619 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14620 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14621 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14624 if (Subtarget->hasInt256())
14625 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14627 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14628 SDValue Undef = DAG.getUNDEF(InVT);
14629 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14630 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14631 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14633 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14634 VT.getVectorNumElements()/2);
14636 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14637 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14639 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14642 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14643 SelectionDAG &DAG) {
14644 MVT VT = Op->getSimpleValueType(0);
14645 SDValue In = Op->getOperand(0);
14646 MVT InVT = In.getSimpleValueType();
14648 unsigned int NumElts = VT.getVectorNumElements();
14649 if (NumElts != 8 && NumElts != 16)
14652 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14653 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14655 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14656 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14657 // Now we have only mask extension
14658 assert(InVT.getVectorElementType() == MVT::i1);
14659 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14660 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14661 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14662 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14663 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14664 MachinePointerInfo::getConstantPool(),
14665 false, false, false, Alignment);
14667 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14668 if (VT.is512BitVector())
14670 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14673 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14674 SelectionDAG &DAG) {
14675 if (Subtarget->hasFp256()) {
14676 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14684 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14685 SelectionDAG &DAG) {
14687 MVT VT = Op.getSimpleValueType();
14688 SDValue In = Op.getOperand(0);
14689 MVT SVT = In.getSimpleValueType();
14691 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14692 return LowerZERO_EXTEND_AVX512(Op, DAG);
14694 if (Subtarget->hasFp256()) {
14695 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14700 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14701 VT.getVectorNumElements() != SVT.getVectorNumElements());
14705 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14707 MVT VT = Op.getSimpleValueType();
14708 SDValue In = Op.getOperand(0);
14709 MVT InVT = In.getSimpleValueType();
14711 if (VT == MVT::i1) {
14712 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14713 "Invalid scalar TRUNCATE operation");
14714 if (InVT.getSizeInBits() >= 32)
14716 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14717 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14719 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14720 "Invalid TRUNCATE operation");
14722 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14723 if (VT.getVectorElementType().getSizeInBits() >=8)
14724 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14726 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14727 unsigned NumElts = InVT.getVectorNumElements();
14728 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14729 if (InVT.getSizeInBits() < 512) {
14730 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14731 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14735 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14736 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14737 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14738 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14739 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14740 MachinePointerInfo::getConstantPool(),
14741 false, false, false, Alignment);
14742 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14743 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14744 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14747 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14748 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14749 if (Subtarget->hasInt256()) {
14750 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14751 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14752 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14754 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14755 DAG.getIntPtrConstant(0));
14758 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14759 DAG.getIntPtrConstant(0));
14760 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14761 DAG.getIntPtrConstant(2));
14762 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14763 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14764 static const int ShufMask[] = {0, 2, 4, 6};
14765 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14768 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14769 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14770 if (Subtarget->hasInt256()) {
14771 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14773 SmallVector<SDValue,32> pshufbMask;
14774 for (unsigned i = 0; i < 2; ++i) {
14775 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14776 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14777 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14778 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14779 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14780 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14781 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14782 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14783 for (unsigned j = 0; j < 8; ++j)
14784 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14786 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14787 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14788 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14790 static const int ShufMask[] = {0, 2, -1, -1};
14791 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14793 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14794 DAG.getIntPtrConstant(0));
14795 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14798 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14799 DAG.getIntPtrConstant(0));
14801 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14802 DAG.getIntPtrConstant(4));
14804 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14805 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14807 // The PSHUFB mask:
14808 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14809 -1, -1, -1, -1, -1, -1, -1, -1};
14811 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14812 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14813 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14815 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14816 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14818 // The MOVLHPS Mask:
14819 static const int ShufMask2[] = {0, 1, 4, 5};
14820 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14821 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14824 // Handle truncation of V256 to V128 using shuffles.
14825 if (!VT.is128BitVector() || !InVT.is256BitVector())
14828 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14830 unsigned NumElems = VT.getVectorNumElements();
14831 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14833 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14834 // Prepare truncation shuffle mask
14835 for (unsigned i = 0; i != NumElems; ++i)
14836 MaskVec[i] = i * 2;
14837 SDValue V = DAG.getVectorShuffle(NVT, DL,
14838 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14839 DAG.getUNDEF(NVT), &MaskVec[0]);
14840 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14841 DAG.getIntPtrConstant(0));
14844 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14845 SelectionDAG &DAG) const {
14846 assert(!Op.getSimpleValueType().isVector());
14848 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14849 /*IsSigned=*/ true, /*IsReplace=*/ false);
14850 SDValue FIST = Vals.first, StackSlot = Vals.second;
14851 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14852 if (!FIST.getNode()) return Op;
14854 if (StackSlot.getNode())
14855 // Load the result.
14856 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14857 FIST, StackSlot, MachinePointerInfo(),
14858 false, false, false, 0);
14860 // The node is the result.
14864 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14865 SelectionDAG &DAG) const {
14866 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14867 /*IsSigned=*/ false, /*IsReplace=*/ false);
14868 SDValue FIST = Vals.first, StackSlot = Vals.second;
14869 assert(FIST.getNode() && "Unexpected failure");
14871 if (StackSlot.getNode())
14872 // Load the result.
14873 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14874 FIST, StackSlot, MachinePointerInfo(),
14875 false, false, false, 0);
14877 // The node is the result.
14881 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14883 MVT VT = Op.getSimpleValueType();
14884 SDValue In = Op.getOperand(0);
14885 MVT SVT = In.getSimpleValueType();
14887 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14889 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14890 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14891 In, DAG.getUNDEF(SVT)));
14894 /// The only differences between FABS and FNEG are the mask and the logic op.
14895 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14896 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14897 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14898 "Wrong opcode for lowering FABS or FNEG.");
14900 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14902 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14903 // into an FNABS. We'll lower the FABS after that if it is still in use.
14905 for (SDNode *User : Op->uses())
14906 if (User->getOpcode() == ISD::FNEG)
14909 SDValue Op0 = Op.getOperand(0);
14910 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
14913 MVT VT = Op.getSimpleValueType();
14914 // Assume scalar op for initialization; update for vector if needed.
14915 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
14916 // generate a 16-byte vector constant and logic op even for the scalar case.
14917 // Using a 16-byte mask allows folding the load of the mask with
14918 // the logic op, so it can save (~4 bytes) on code size.
14920 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
14921 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
14922 // decide if we should generate a 16-byte constant mask when we only need 4 or
14923 // 8 bytes for the scalar case.
14924 if (VT.isVector()) {
14925 EltVT = VT.getVectorElementType();
14926 NumElts = VT.getVectorNumElements();
14929 unsigned EltBits = EltVT.getSizeInBits();
14930 LLVMContext *Context = DAG.getContext();
14931 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
14933 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
14934 Constant *C = ConstantInt::get(*Context, MaskElt);
14935 C = ConstantVector::getSplat(NumElts, C);
14936 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14937 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
14938 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
14939 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14940 MachinePointerInfo::getConstantPool(),
14941 false, false, false, Alignment);
14943 if (VT.isVector()) {
14944 // For a vector, cast operands to a vector type, perform the logic op,
14945 // and cast the result back to the original value type.
14946 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
14947 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
14948 SDValue Operand = IsFNABS ?
14949 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
14950 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
14951 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
14952 return DAG.getNode(ISD::BITCAST, dl, VT,
14953 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
14956 // If not vector, then scalar.
14957 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
14958 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
14959 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
14962 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
14963 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14964 LLVMContext *Context = DAG.getContext();
14965 SDValue Op0 = Op.getOperand(0);
14966 SDValue Op1 = Op.getOperand(1);
14968 MVT VT = Op.getSimpleValueType();
14969 MVT SrcVT = Op1.getSimpleValueType();
14971 // If second operand is smaller, extend it first.
14972 if (SrcVT.bitsLT(VT)) {
14973 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
14976 // And if it is bigger, shrink it first.
14977 if (SrcVT.bitsGT(VT)) {
14978 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
14982 // At this point the operands and the result should have the same
14983 // type, and that won't be f80 since that is not custom lowered.
14985 const fltSemantics &Sem =
14986 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
14987 const unsigned SizeInBits = VT.getSizeInBits();
14989 SmallVector<Constant *, 4> CV(
14990 VT == MVT::f64 ? 2 : 4,
14991 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
14993 // First, clear all bits but the sign bit from the second operand (sign).
14994 CV[0] = ConstantFP::get(*Context,
14995 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
14996 Constant *C = ConstantVector::get(CV);
14997 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
14998 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
14999 MachinePointerInfo::getConstantPool(),
15000 false, false, false, 16);
15001 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
15003 // Next, clear the sign bit from the first operand (magnitude).
15004 // If it's a constant, we can clear it here.
15005 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
15006 APFloat APF = Op0CN->getValueAPF();
15007 // If the magnitude is a positive zero, the sign bit alone is enough.
15008 if (APF.isPosZero())
15011 CV[0] = ConstantFP::get(*Context, APF);
15013 CV[0] = ConstantFP::get(
15015 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
15017 C = ConstantVector::get(CV);
15018 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15019 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15020 MachinePointerInfo::getConstantPool(),
15021 false, false, false, 16);
15022 // If the magnitude operand wasn't a constant, we need to AND out the sign.
15023 if (!isa<ConstantFPSDNode>(Op0))
15024 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
15026 // OR the magnitude value with the sign bit.
15027 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
15030 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
15031 SDValue N0 = Op.getOperand(0);
15033 MVT VT = Op.getSimpleValueType();
15035 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
15036 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
15037 DAG.getConstant(1, VT));
15038 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
15041 // Check whether an OR'd tree is PTEST-able.
15042 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
15043 SelectionDAG &DAG) {
15044 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
15046 if (!Subtarget->hasSSE41())
15049 if (!Op->hasOneUse())
15052 SDNode *N = Op.getNode();
15055 SmallVector<SDValue, 8> Opnds;
15056 DenseMap<SDValue, unsigned> VecInMap;
15057 SmallVector<SDValue, 8> VecIns;
15058 EVT VT = MVT::Other;
15060 // Recognize a special case where a vector is casted into wide integer to
15062 Opnds.push_back(N->getOperand(0));
15063 Opnds.push_back(N->getOperand(1));
15065 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
15066 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
15067 // BFS traverse all OR'd operands.
15068 if (I->getOpcode() == ISD::OR) {
15069 Opnds.push_back(I->getOperand(0));
15070 Opnds.push_back(I->getOperand(1));
15071 // Re-evaluate the number of nodes to be traversed.
15072 e += 2; // 2 more nodes (LHS and RHS) are pushed.
15076 // Quit if a non-EXTRACT_VECTOR_ELT
15077 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15080 // Quit if without a constant index.
15081 SDValue Idx = I->getOperand(1);
15082 if (!isa<ConstantSDNode>(Idx))
15085 SDValue ExtractedFromVec = I->getOperand(0);
15086 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
15087 if (M == VecInMap.end()) {
15088 VT = ExtractedFromVec.getValueType();
15089 // Quit if not 128/256-bit vector.
15090 if (!VT.is128BitVector() && !VT.is256BitVector())
15092 // Quit if not the same type.
15093 if (VecInMap.begin() != VecInMap.end() &&
15094 VT != VecInMap.begin()->first.getValueType())
15096 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
15097 VecIns.push_back(ExtractedFromVec);
15099 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
15102 assert((VT.is128BitVector() || VT.is256BitVector()) &&
15103 "Not extracted from 128-/256-bit vector.");
15105 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
15107 for (DenseMap<SDValue, unsigned>::const_iterator
15108 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
15109 // Quit if not all elements are used.
15110 if (I->second != FullMask)
15114 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
15116 // Cast all vectors into TestVT for PTEST.
15117 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
15118 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
15120 // If more than one full vectors are evaluated, OR them first before PTEST.
15121 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
15122 // Each iteration will OR 2 nodes and append the result until there is only
15123 // 1 node left, i.e. the final OR'd value of all vectors.
15124 SDValue LHS = VecIns[Slot];
15125 SDValue RHS = VecIns[Slot + 1];
15126 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
15129 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
15130 VecIns.back(), VecIns.back());
15133 /// \brief return true if \c Op has a use that doesn't just read flags.
15134 static bool hasNonFlagsUse(SDValue Op) {
15135 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
15137 SDNode *User = *UI;
15138 unsigned UOpNo = UI.getOperandNo();
15139 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
15140 // Look pass truncate.
15141 UOpNo = User->use_begin().getOperandNo();
15142 User = *User->use_begin();
15145 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
15146 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
15152 /// Emit nodes that will be selected as "test Op0,Op0", or something
15154 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
15155 SelectionDAG &DAG) const {
15156 if (Op.getValueType() == MVT::i1) {
15157 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
15158 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
15159 DAG.getConstant(0, MVT::i8));
15161 // CF and OF aren't always set the way we want. Determine which
15162 // of these we need.
15163 bool NeedCF = false;
15164 bool NeedOF = false;
15167 case X86::COND_A: case X86::COND_AE:
15168 case X86::COND_B: case X86::COND_BE:
15171 case X86::COND_G: case X86::COND_GE:
15172 case X86::COND_L: case X86::COND_LE:
15173 case X86::COND_O: case X86::COND_NO: {
15174 // Check if we really need to set the
15175 // Overflow flag. If NoSignedWrap is present
15176 // that is not actually needed.
15177 switch (Op->getOpcode()) {
15182 const BinaryWithFlagsSDNode *BinNode =
15183 cast<BinaryWithFlagsSDNode>(Op.getNode());
15184 if (BinNode->hasNoSignedWrap())
15194 // See if we can use the EFLAGS value from the operand instead of
15195 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
15196 // we prove that the arithmetic won't overflow, we can't use OF or CF.
15197 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
15198 // Emit a CMP with 0, which is the TEST pattern.
15199 //if (Op.getValueType() == MVT::i1)
15200 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
15201 // DAG.getConstant(0, MVT::i1));
15202 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15203 DAG.getConstant(0, Op.getValueType()));
15205 unsigned Opcode = 0;
15206 unsigned NumOperands = 0;
15208 // Truncate operations may prevent the merge of the SETCC instruction
15209 // and the arithmetic instruction before it. Attempt to truncate the operands
15210 // of the arithmetic instruction and use a reduced bit-width instruction.
15211 bool NeedTruncation = false;
15212 SDValue ArithOp = Op;
15213 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
15214 SDValue Arith = Op->getOperand(0);
15215 // Both the trunc and the arithmetic op need to have one user each.
15216 if (Arith->hasOneUse())
15217 switch (Arith.getOpcode()) {
15224 NeedTruncation = true;
15230 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
15231 // which may be the result of a CAST. We use the variable 'Op', which is the
15232 // non-casted variable when we check for possible users.
15233 switch (ArithOp.getOpcode()) {
15235 // Due to an isel shortcoming, be conservative if this add is likely to be
15236 // selected as part of a load-modify-store instruction. When the root node
15237 // in a match is a store, isel doesn't know how to remap non-chain non-flag
15238 // uses of other nodes in the match, such as the ADD in this case. This
15239 // leads to the ADD being left around and reselected, with the result being
15240 // two adds in the output. Alas, even if none our users are stores, that
15241 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
15242 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
15243 // climbing the DAG back to the root, and it doesn't seem to be worth the
15245 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15246 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15247 if (UI->getOpcode() != ISD::CopyToReg &&
15248 UI->getOpcode() != ISD::SETCC &&
15249 UI->getOpcode() != ISD::STORE)
15252 if (ConstantSDNode *C =
15253 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
15254 // An add of one will be selected as an INC.
15255 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
15256 Opcode = X86ISD::INC;
15261 // An add of negative one (subtract of one) will be selected as a DEC.
15262 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
15263 Opcode = X86ISD::DEC;
15269 // Otherwise use a regular EFLAGS-setting add.
15270 Opcode = X86ISD::ADD;
15275 // If we have a constant logical shift that's only used in a comparison
15276 // against zero turn it into an equivalent AND. This allows turning it into
15277 // a TEST instruction later.
15278 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
15279 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
15280 EVT VT = Op.getValueType();
15281 unsigned BitWidth = VT.getSizeInBits();
15282 unsigned ShAmt = Op->getConstantOperandVal(1);
15283 if (ShAmt >= BitWidth) // Avoid undefined shifts.
15285 APInt Mask = ArithOp.getOpcode() == ISD::SRL
15286 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
15287 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
15288 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
15290 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
15291 DAG.getConstant(Mask, VT));
15292 DAG.ReplaceAllUsesWith(Op, New);
15298 // If the primary and result isn't used, don't bother using X86ISD::AND,
15299 // because a TEST instruction will be better.
15300 if (!hasNonFlagsUse(Op))
15306 // Due to the ISEL shortcoming noted above, be conservative if this op is
15307 // likely to be selected as part of a load-modify-store instruction.
15308 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15309 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15310 if (UI->getOpcode() == ISD::STORE)
15313 // Otherwise use a regular EFLAGS-setting instruction.
15314 switch (ArithOp.getOpcode()) {
15315 default: llvm_unreachable("unexpected operator!");
15316 case ISD::SUB: Opcode = X86ISD::SUB; break;
15317 case ISD::XOR: Opcode = X86ISD::XOR; break;
15318 case ISD::AND: Opcode = X86ISD::AND; break;
15320 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15321 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15322 if (EFLAGS.getNode())
15325 Opcode = X86ISD::OR;
15339 return SDValue(Op.getNode(), 1);
15345 // If we found that truncation is beneficial, perform the truncation and
15347 if (NeedTruncation) {
15348 EVT VT = Op.getValueType();
15349 SDValue WideVal = Op->getOperand(0);
15350 EVT WideVT = WideVal.getValueType();
15351 unsigned ConvertedOp = 0;
15352 // Use a target machine opcode to prevent further DAGCombine
15353 // optimizations that may separate the arithmetic operations
15354 // from the setcc node.
15355 switch (WideVal.getOpcode()) {
15357 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15358 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15359 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15360 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15361 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15365 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15366 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15367 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15368 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15369 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15375 // Emit a CMP with 0, which is the TEST pattern.
15376 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15377 DAG.getConstant(0, Op.getValueType()));
15379 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15380 SmallVector<SDValue, 4> Ops;
15381 for (unsigned i = 0; i != NumOperands; ++i)
15382 Ops.push_back(Op.getOperand(i));
15384 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15385 DAG.ReplaceAllUsesWith(Op, New);
15386 return SDValue(New.getNode(), 1);
15389 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15391 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15392 SDLoc dl, SelectionDAG &DAG) const {
15393 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15394 if (C->getAPIntValue() == 0)
15395 return EmitTest(Op0, X86CC, dl, DAG);
15397 if (Op0.getValueType() == MVT::i1)
15398 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15401 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15402 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15403 // Do the comparison at i32 if it's smaller, besides the Atom case.
15404 // This avoids subregister aliasing issues. Keep the smaller reference
15405 // if we're optimizing for size, however, as that'll allow better folding
15406 // of memory operations.
15407 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15408 !DAG.getMachineFunction().getFunction()->hasFnAttribute(
15409 Attribute::MinSize) &&
15410 !Subtarget->isAtom()) {
15411 unsigned ExtendOp =
15412 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15413 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15414 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15416 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15417 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15418 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15420 return SDValue(Sub.getNode(), 1);
15422 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15425 /// Convert a comparison if required by the subtarget.
15426 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15427 SelectionDAG &DAG) const {
15428 // If the subtarget does not support the FUCOMI instruction, floating-point
15429 // comparisons have to be converted.
15430 if (Subtarget->hasCMov() ||
15431 Cmp.getOpcode() != X86ISD::CMP ||
15432 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15433 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15436 // The instruction selector will select an FUCOM instruction instead of
15437 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15438 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15439 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15441 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15442 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15443 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15444 DAG.getConstant(8, MVT::i8));
15445 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15446 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15449 /// The minimum architected relative accuracy is 2^-12. We need one
15450 /// Newton-Raphson step to have a good float result (24 bits of precision).
15451 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15452 DAGCombinerInfo &DCI,
15453 unsigned &RefinementSteps,
15454 bool &UseOneConstNR) const {
15455 // FIXME: We should use instruction latency models to calculate the cost of
15456 // each potential sequence, but this is very hard to do reliably because
15457 // at least Intel's Core* chips have variable timing based on the number of
15458 // significant digits in the divisor and/or sqrt operand.
15459 if (!Subtarget->useSqrtEst())
15462 EVT VT = Op.getValueType();
15464 // SSE1 has rsqrtss and rsqrtps.
15465 // TODO: Add support for AVX512 (v16f32).
15466 // It is likely not profitable to do this for f64 because a double-precision
15467 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15468 // instructions: convert to single, rsqrtss, convert back to double, refine
15469 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15470 // along with FMA, this could be a throughput win.
15471 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15472 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15473 RefinementSteps = 1;
15474 UseOneConstNR = false;
15475 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15480 /// The minimum architected relative accuracy is 2^-12. We need one
15481 /// Newton-Raphson step to have a good float result (24 bits of precision).
15482 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15483 DAGCombinerInfo &DCI,
15484 unsigned &RefinementSteps) const {
15485 // FIXME: We should use instruction latency models to calculate the cost of
15486 // each potential sequence, but this is very hard to do reliably because
15487 // at least Intel's Core* chips have variable timing based on the number of
15488 // significant digits in the divisor.
15489 if (!Subtarget->useReciprocalEst())
15492 EVT VT = Op.getValueType();
15494 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15495 // TODO: Add support for AVX512 (v16f32).
15496 // It is likely not profitable to do this for f64 because a double-precision
15497 // reciprocal estimate with refinement on x86 prior to FMA requires
15498 // 15 instructions: convert to single, rcpss, convert back to double, refine
15499 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15500 // along with FMA, this could be a throughput win.
15501 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15502 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15503 RefinementSteps = ReciprocalEstimateRefinementSteps;
15504 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15509 static bool isAllOnes(SDValue V) {
15510 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15511 return C && C->isAllOnesValue();
15514 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15515 /// if it's possible.
15516 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15517 SDLoc dl, SelectionDAG &DAG) const {
15518 SDValue Op0 = And.getOperand(0);
15519 SDValue Op1 = And.getOperand(1);
15520 if (Op0.getOpcode() == ISD::TRUNCATE)
15521 Op0 = Op0.getOperand(0);
15522 if (Op1.getOpcode() == ISD::TRUNCATE)
15523 Op1 = Op1.getOperand(0);
15526 if (Op1.getOpcode() == ISD::SHL)
15527 std::swap(Op0, Op1);
15528 if (Op0.getOpcode() == ISD::SHL) {
15529 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15530 if (And00C->getZExtValue() == 1) {
15531 // If we looked past a truncate, check that it's only truncating away
15533 unsigned BitWidth = Op0.getValueSizeInBits();
15534 unsigned AndBitWidth = And.getValueSizeInBits();
15535 if (BitWidth > AndBitWidth) {
15537 DAG.computeKnownBits(Op0, Zeros, Ones);
15538 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15542 RHS = Op0.getOperand(1);
15544 } else if (Op1.getOpcode() == ISD::Constant) {
15545 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15546 uint64_t AndRHSVal = AndRHS->getZExtValue();
15547 SDValue AndLHS = Op0;
15549 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15550 LHS = AndLHS.getOperand(0);
15551 RHS = AndLHS.getOperand(1);
15554 // Use BT if the immediate can't be encoded in a TEST instruction.
15555 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15557 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15561 if (LHS.getNode()) {
15562 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15563 // instruction. Since the shift amount is in-range-or-undefined, we know
15564 // that doing a bittest on the i32 value is ok. We extend to i32 because
15565 // the encoding for the i16 version is larger than the i32 version.
15566 // Also promote i16 to i32 for performance / code size reason.
15567 if (LHS.getValueType() == MVT::i8 ||
15568 LHS.getValueType() == MVT::i16)
15569 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15571 // If the operand types disagree, extend the shift amount to match. Since
15572 // BT ignores high bits (like shifts) we can use anyextend.
15573 if (LHS.getValueType() != RHS.getValueType())
15574 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15576 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15577 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15578 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15579 DAG.getConstant(Cond, MVT::i8), BT);
15585 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15587 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15592 // SSE Condition code mapping:
15601 switch (SetCCOpcode) {
15602 default: llvm_unreachable("Unexpected SETCC condition");
15604 case ISD::SETEQ: SSECC = 0; break;
15606 case ISD::SETGT: Swap = true; // Fallthrough
15608 case ISD::SETOLT: SSECC = 1; break;
15610 case ISD::SETGE: Swap = true; // Fallthrough
15612 case ISD::SETOLE: SSECC = 2; break;
15613 case ISD::SETUO: SSECC = 3; break;
15615 case ISD::SETNE: SSECC = 4; break;
15616 case ISD::SETULE: Swap = true; // Fallthrough
15617 case ISD::SETUGE: SSECC = 5; break;
15618 case ISD::SETULT: Swap = true; // Fallthrough
15619 case ISD::SETUGT: SSECC = 6; break;
15620 case ISD::SETO: SSECC = 7; break;
15622 case ISD::SETONE: SSECC = 8; break;
15625 std::swap(Op0, Op1);
15630 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15631 // ones, and then concatenate the result back.
15632 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15633 MVT VT = Op.getSimpleValueType();
15635 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15636 "Unsupported value type for operation");
15638 unsigned NumElems = VT.getVectorNumElements();
15640 SDValue CC = Op.getOperand(2);
15642 // Extract the LHS vectors
15643 SDValue LHS = Op.getOperand(0);
15644 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15645 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15647 // Extract the RHS vectors
15648 SDValue RHS = Op.getOperand(1);
15649 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15650 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15652 // Issue the operation on the smaller types and concatenate the result back
15653 MVT EltVT = VT.getVectorElementType();
15654 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15655 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15656 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15657 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15660 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15661 const X86Subtarget *Subtarget) {
15662 SDValue Op0 = Op.getOperand(0);
15663 SDValue Op1 = Op.getOperand(1);
15664 SDValue CC = Op.getOperand(2);
15665 MVT VT = Op.getSimpleValueType();
15668 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15669 Op.getValueType().getScalarType() == MVT::i1 &&
15670 "Cannot set masked compare for this operation");
15672 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15674 bool Unsigned = false;
15677 switch (SetCCOpcode) {
15678 default: llvm_unreachable("Unexpected SETCC condition");
15679 case ISD::SETNE: SSECC = 4; break;
15680 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15681 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15682 case ISD::SETLT: Swap = true; //fall-through
15683 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15684 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15685 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15686 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15687 case ISD::SETULE: Unsigned = true; //fall-through
15688 case ISD::SETLE: SSECC = 2; break;
15692 std::swap(Op0, Op1);
15694 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15695 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15696 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15697 DAG.getConstant(SSECC, MVT::i8));
15700 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15701 /// operand \p Op1. If non-trivial (for example because it's not constant)
15702 /// return an empty value.
15703 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15705 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15709 MVT VT = Op1.getSimpleValueType();
15710 MVT EVT = VT.getVectorElementType();
15711 unsigned n = VT.getVectorNumElements();
15712 SmallVector<SDValue, 8> ULTOp1;
15714 for (unsigned i = 0; i < n; ++i) {
15715 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15716 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15719 // Avoid underflow.
15720 APInt Val = Elt->getAPIntValue();
15724 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15727 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15730 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15731 SelectionDAG &DAG) {
15732 SDValue Op0 = Op.getOperand(0);
15733 SDValue Op1 = Op.getOperand(1);
15734 SDValue CC = Op.getOperand(2);
15735 MVT VT = Op.getSimpleValueType();
15736 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15737 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15742 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15743 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15746 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15747 unsigned Opc = X86ISD::CMPP;
15748 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15749 assert(VT.getVectorNumElements() <= 16);
15750 Opc = X86ISD::CMPM;
15752 // In the two special cases we can't handle, emit two comparisons.
15755 unsigned CombineOpc;
15756 if (SetCCOpcode == ISD::SETUEQ) {
15757 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15759 assert(SetCCOpcode == ISD::SETONE);
15760 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15763 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15764 DAG.getConstant(CC0, MVT::i8));
15765 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15766 DAG.getConstant(CC1, MVT::i8));
15767 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15769 // Handle all other FP comparisons here.
15770 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15771 DAG.getConstant(SSECC, MVT::i8));
15774 // Break 256-bit integer vector compare into smaller ones.
15775 if (VT.is256BitVector() && !Subtarget->hasInt256())
15776 return Lower256IntVSETCC(Op, DAG);
15778 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15779 EVT OpVT = Op1.getValueType();
15780 if (Subtarget->hasAVX512()) {
15781 if (Op1.getValueType().is512BitVector() ||
15782 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15783 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15784 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15786 // In AVX-512 architecture setcc returns mask with i1 elements,
15787 // But there is no compare instruction for i8 and i16 elements in KNL.
15788 // We are not talking about 512-bit operands in this case, these
15789 // types are illegal.
15791 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15792 OpVT.getVectorElementType().getSizeInBits() >= 8))
15793 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15794 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15797 // We are handling one of the integer comparisons here. Since SSE only has
15798 // GT and EQ comparisons for integer, swapping operands and multiple
15799 // operations may be required for some comparisons.
15801 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15802 bool Subus = false;
15804 switch (SetCCOpcode) {
15805 default: llvm_unreachable("Unexpected SETCC condition");
15806 case ISD::SETNE: Invert = true;
15807 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15808 case ISD::SETLT: Swap = true;
15809 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15810 case ISD::SETGE: Swap = true;
15811 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15812 Invert = true; break;
15813 case ISD::SETULT: Swap = true;
15814 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15815 FlipSigns = true; break;
15816 case ISD::SETUGE: Swap = true;
15817 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15818 FlipSigns = true; Invert = true; break;
15821 // Special case: Use min/max operations for SETULE/SETUGE
15822 MVT VET = VT.getVectorElementType();
15824 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15825 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15828 switch (SetCCOpcode) {
15830 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15831 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15834 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15837 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15838 if (!MinMax && hasSubus) {
15839 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15841 // t = psubus Op0, Op1
15842 // pcmpeq t, <0..0>
15843 switch (SetCCOpcode) {
15845 case ISD::SETULT: {
15846 // If the comparison is against a constant we can turn this into a
15847 // setule. With psubus, setule does not require a swap. This is
15848 // beneficial because the constant in the register is no longer
15849 // destructed as the destination so it can be hoisted out of a loop.
15850 // Only do this pre-AVX since vpcmp* is no longer destructive.
15851 if (Subtarget->hasAVX())
15853 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
15854 if (ULEOp1.getNode()) {
15856 Subus = true; Invert = false; Swap = false;
15860 // Psubus is better than flip-sign because it requires no inversion.
15861 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15862 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15866 Opc = X86ISD::SUBUS;
15872 std::swap(Op0, Op1);
15874 // Check that the operation in question is available (most are plain SSE2,
15875 // but PCMPGTQ and PCMPEQQ have different requirements).
15876 if (VT == MVT::v2i64) {
15877 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
15878 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
15880 // First cast everything to the right type.
15881 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15882 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15884 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15885 // bits of the inputs before performing those operations. The lower
15886 // compare is always unsigned.
15889 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
15891 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
15892 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
15893 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
15894 Sign, Zero, Sign, Zero);
15896 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15897 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15899 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15900 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15901 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15903 // Create masks for only the low parts/high parts of the 64 bit integers.
15904 static const int MaskHi[] = { 1, 1, 3, 3 };
15905 static const int MaskLo[] = { 0, 0, 2, 2 };
15906 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
15907 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
15908 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
15910 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
15911 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
15914 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15916 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15919 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
15920 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
15921 // pcmpeqd + pshufd + pand.
15922 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
15924 // First cast everything to the right type.
15925 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15926 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15929 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
15931 // Make sure the lower and upper halves are both all-ones.
15932 static const int Mask[] = { 1, 0, 3, 2 };
15933 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
15934 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
15937 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15939 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15943 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15944 // bits of the inputs before performing those operations.
15946 EVT EltVT = VT.getVectorElementType();
15947 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
15948 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
15949 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
15952 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
15954 // If the logical-not of the result is required, perform that now.
15956 Result = DAG.getNOT(dl, Result, VT);
15959 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
15962 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
15963 getZeroVector(VT, Subtarget, DAG, dl));
15968 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
15970 MVT VT = Op.getSimpleValueType();
15972 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
15974 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
15975 && "SetCC type must be 8-bit or 1-bit integer");
15976 SDValue Op0 = Op.getOperand(0);
15977 SDValue Op1 = Op.getOperand(1);
15979 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
15981 // Optimize to BT if possible.
15982 // Lower (X & (1 << N)) == 0 to BT(X, N).
15983 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
15984 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
15985 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
15986 Op1.getOpcode() == ISD::Constant &&
15987 cast<ConstantSDNode>(Op1)->isNullValue() &&
15988 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
15989 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
15990 if (NewSetCC.getNode()) {
15992 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
15997 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
15999 if (Op1.getOpcode() == ISD::Constant &&
16000 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
16001 cast<ConstantSDNode>(Op1)->isNullValue()) &&
16002 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16004 // If the input is a setcc, then reuse the input setcc or use a new one with
16005 // the inverted condition.
16006 if (Op0.getOpcode() == X86ISD::SETCC) {
16007 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
16008 bool Invert = (CC == ISD::SETNE) ^
16009 cast<ConstantSDNode>(Op1)->isNullValue();
16013 CCode = X86::GetOppositeBranchCondition(CCode);
16014 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16015 DAG.getConstant(CCode, MVT::i8),
16016 Op0.getOperand(1));
16018 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16022 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
16023 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
16024 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16026 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
16027 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
16030 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
16031 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
16032 if (X86CC == X86::COND_INVALID)
16035 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
16036 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
16037 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16038 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
16040 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16044 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
16045 static bool isX86LogicalCmp(SDValue Op) {
16046 unsigned Opc = Op.getNode()->getOpcode();
16047 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
16048 Opc == X86ISD::SAHF)
16050 if (Op.getResNo() == 1 &&
16051 (Opc == X86ISD::ADD ||
16052 Opc == X86ISD::SUB ||
16053 Opc == X86ISD::ADC ||
16054 Opc == X86ISD::SBB ||
16055 Opc == X86ISD::SMUL ||
16056 Opc == X86ISD::UMUL ||
16057 Opc == X86ISD::INC ||
16058 Opc == X86ISD::DEC ||
16059 Opc == X86ISD::OR ||
16060 Opc == X86ISD::XOR ||
16061 Opc == X86ISD::AND))
16064 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
16070 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
16071 if (V.getOpcode() != ISD::TRUNCATE)
16074 SDValue VOp0 = V.getOperand(0);
16075 unsigned InBits = VOp0.getValueSizeInBits();
16076 unsigned Bits = V.getValueSizeInBits();
16077 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
16080 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
16081 bool addTest = true;
16082 SDValue Cond = Op.getOperand(0);
16083 SDValue Op1 = Op.getOperand(1);
16084 SDValue Op2 = Op.getOperand(2);
16086 EVT VT = Op1.getValueType();
16089 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
16090 // are available. Otherwise fp cmovs get lowered into a less efficient branch
16091 // sequence later on.
16092 if (Cond.getOpcode() == ISD::SETCC &&
16093 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
16094 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
16095 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
16096 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
16097 int SSECC = translateX86FSETCC(
16098 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
16101 if (Subtarget->hasAVX512()) {
16102 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
16103 DAG.getConstant(SSECC, MVT::i8));
16104 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
16106 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
16107 DAG.getConstant(SSECC, MVT::i8));
16108 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
16109 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
16110 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
16114 if (Cond.getOpcode() == ISD::SETCC) {
16115 SDValue NewCond = LowerSETCC(Cond, DAG);
16116 if (NewCond.getNode())
16120 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
16121 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
16122 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
16123 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
16124 if (Cond.getOpcode() == X86ISD::SETCC &&
16125 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
16126 isZero(Cond.getOperand(1).getOperand(1))) {
16127 SDValue Cmp = Cond.getOperand(1);
16129 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
16131 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
16132 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
16133 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
16135 SDValue CmpOp0 = Cmp.getOperand(0);
16136 // Apply further optimizations for special cases
16137 // (select (x != 0), -1, 0) -> neg & sbb
16138 // (select (x == 0), 0, -1) -> neg & sbb
16139 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
16140 if (YC->isNullValue() &&
16141 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
16142 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
16143 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
16144 DAG.getConstant(0, CmpOp0.getValueType()),
16146 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16147 DAG.getConstant(X86::COND_B, MVT::i8),
16148 SDValue(Neg.getNode(), 1));
16152 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
16153 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
16154 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16156 SDValue Res = // Res = 0 or -1.
16157 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16158 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
16160 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
16161 Res = DAG.getNOT(DL, Res, Res.getValueType());
16163 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
16164 if (!N2C || !N2C->isNullValue())
16165 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
16170 // Look past (and (setcc_carry (cmp ...)), 1).
16171 if (Cond.getOpcode() == ISD::AND &&
16172 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16173 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16174 if (C && C->getAPIntValue() == 1)
16175 Cond = Cond.getOperand(0);
16178 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16179 // setting operand in place of the X86ISD::SETCC.
16180 unsigned CondOpcode = Cond.getOpcode();
16181 if (CondOpcode == X86ISD::SETCC ||
16182 CondOpcode == X86ISD::SETCC_CARRY) {
16183 CC = Cond.getOperand(0);
16185 SDValue Cmp = Cond.getOperand(1);
16186 unsigned Opc = Cmp.getOpcode();
16187 MVT VT = Op.getSimpleValueType();
16189 bool IllegalFPCMov = false;
16190 if (VT.isFloatingPoint() && !VT.isVector() &&
16191 !isScalarFPTypeInSSEReg(VT)) // FPStack?
16192 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
16194 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
16195 Opc == X86ISD::BT) { // FIXME
16199 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16200 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16201 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16202 Cond.getOperand(0).getValueType() != MVT::i8)) {
16203 SDValue LHS = Cond.getOperand(0);
16204 SDValue RHS = Cond.getOperand(1);
16205 unsigned X86Opcode;
16208 switch (CondOpcode) {
16209 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16210 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16211 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16212 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16213 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16214 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16215 default: llvm_unreachable("unexpected overflowing operator");
16217 if (CondOpcode == ISD::UMULO)
16218 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16221 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16223 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
16225 if (CondOpcode == ISD::UMULO)
16226 Cond = X86Op.getValue(2);
16228 Cond = X86Op.getValue(1);
16230 CC = DAG.getConstant(X86Cond, MVT::i8);
16235 // Look pass the truncate if the high bits are known zero.
16236 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16237 Cond = Cond.getOperand(0);
16239 // We know the result of AND is compared against zero. Try to match
16241 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16242 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
16243 if (NewSetCC.getNode()) {
16244 CC = NewSetCC.getOperand(0);
16245 Cond = NewSetCC.getOperand(1);
16252 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16253 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
16256 // a < b ? -1 : 0 -> RES = ~setcc_carry
16257 // a < b ? 0 : -1 -> RES = setcc_carry
16258 // a >= b ? -1 : 0 -> RES = setcc_carry
16259 // a >= b ? 0 : -1 -> RES = ~setcc_carry
16260 if (Cond.getOpcode() == X86ISD::SUB) {
16261 Cond = ConvertCmpIfNecessary(Cond, DAG);
16262 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
16264 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
16265 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
16266 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16267 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
16268 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
16269 return DAG.getNOT(DL, Res, Res.getValueType());
16274 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
16275 // widen the cmov and push the truncate through. This avoids introducing a new
16276 // branch during isel and doesn't add any extensions.
16277 if (Op.getValueType() == MVT::i8 &&
16278 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
16279 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
16280 if (T1.getValueType() == T2.getValueType() &&
16281 // Blacklist CopyFromReg to avoid partial register stalls.
16282 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
16283 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
16284 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
16285 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
16289 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
16290 // condition is true.
16291 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
16292 SDValue Ops[] = { Op2, Op1, CC, Cond };
16293 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
16296 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
16297 SelectionDAG &DAG) {
16298 MVT VT = Op->getSimpleValueType(0);
16299 SDValue In = Op->getOperand(0);
16300 MVT InVT = In.getSimpleValueType();
16301 MVT VTElt = VT.getVectorElementType();
16302 MVT InVTElt = InVT.getVectorElementType();
16306 if ((InVTElt == MVT::i1) &&
16307 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
16308 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16310 ((Subtarget->hasBWI() && VT.is512BitVector() &&
16311 VTElt.getSizeInBits() <= 16)) ||
16313 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16314 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16316 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16317 VTElt.getSizeInBits() >= 32))))
16318 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16320 unsigned int NumElts = VT.getVectorNumElements();
16322 if (NumElts != 8 && NumElts != 16)
16325 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16326 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16327 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16328 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16331 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16332 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16334 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16335 Constant *C = ConstantInt::get(*DAG.getContext(),
16336 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16338 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16339 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16340 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16341 MachinePointerInfo::getConstantPool(),
16342 false, false, false, Alignment);
16343 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16344 if (VT.is512BitVector())
16346 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16349 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16350 SelectionDAG &DAG) {
16351 MVT VT = Op->getSimpleValueType(0);
16352 SDValue In = Op->getOperand(0);
16353 MVT InVT = In.getSimpleValueType();
16356 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16357 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16359 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16360 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16361 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16364 if (Subtarget->hasInt256())
16365 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16367 // Optimize vectors in AVX mode
16368 // Sign extend v8i16 to v8i32 and
16371 // Divide input vector into two parts
16372 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16373 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16374 // concat the vectors to original VT
16376 unsigned NumElems = InVT.getVectorNumElements();
16377 SDValue Undef = DAG.getUNDEF(InVT);
16379 SmallVector<int,8> ShufMask1(NumElems, -1);
16380 for (unsigned i = 0; i != NumElems/2; ++i)
16383 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16385 SmallVector<int,8> ShufMask2(NumElems, -1);
16386 for (unsigned i = 0; i != NumElems/2; ++i)
16387 ShufMask2[i] = i + NumElems/2;
16389 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16391 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16392 VT.getVectorNumElements()/2);
16394 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16395 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16397 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16400 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16401 // may emit an illegal shuffle but the expansion is still better than scalar
16402 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16403 // we'll emit a shuffle and a arithmetic shift.
16404 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
16405 // TODO: It is possible to support ZExt by zeroing the undef values during
16406 // the shuffle phase or after the shuffle.
16407 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16408 SelectionDAG &DAG) {
16409 MVT RegVT = Op.getSimpleValueType();
16410 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16411 assert(RegVT.isInteger() &&
16412 "We only custom lower integer vector sext loads.");
16414 // Nothing useful we can do without SSE2 shuffles.
16415 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16417 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16419 EVT MemVT = Ld->getMemoryVT();
16420 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16421 unsigned RegSz = RegVT.getSizeInBits();
16423 ISD::LoadExtType Ext = Ld->getExtensionType();
16425 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16426 && "Only anyext and sext are currently implemented.");
16427 assert(MemVT != RegVT && "Cannot extend to the same type");
16428 assert(MemVT.isVector() && "Must load a vector from memory");
16430 unsigned NumElems = RegVT.getVectorNumElements();
16431 unsigned MemSz = MemVT.getSizeInBits();
16432 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16434 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16435 // The only way in which we have a legal 256-bit vector result but not the
16436 // integer 256-bit operations needed to directly lower a sextload is if we
16437 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16438 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16439 // correctly legalized. We do this late to allow the canonical form of
16440 // sextload to persist throughout the rest of the DAG combiner -- it wants
16441 // to fold together any extensions it can, and so will fuse a sign_extend
16442 // of an sextload into a sextload targeting a wider value.
16444 if (MemSz == 128) {
16445 // Just switch this to a normal load.
16446 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16447 "it must be a legal 128-bit vector "
16449 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16450 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16451 Ld->isInvariant(), Ld->getAlignment());
16453 assert(MemSz < 128 &&
16454 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16455 // Do an sext load to a 128-bit vector type. We want to use the same
16456 // number of elements, but elements half as wide. This will end up being
16457 // recursively lowered by this routine, but will succeed as we definitely
16458 // have all the necessary features if we're using AVX1.
16460 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16461 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16463 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16464 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16465 Ld->isNonTemporal(), Ld->isInvariant(),
16466 Ld->getAlignment());
16469 // Replace chain users with the new chain.
16470 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16471 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16473 // Finally, do a normal sign-extend to the desired register.
16474 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16477 // All sizes must be a power of two.
16478 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16479 "Non-power-of-two elements are not custom lowered!");
16481 // Attempt to load the original value using scalar loads.
16482 // Find the largest scalar type that divides the total loaded size.
16483 MVT SclrLoadTy = MVT::i8;
16484 for (MVT Tp : MVT::integer_valuetypes()) {
16485 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16490 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16491 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16493 SclrLoadTy = MVT::f64;
16495 // Calculate the number of scalar loads that we need to perform
16496 // in order to load our vector from memory.
16497 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16499 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16500 "Can only lower sext loads with a single scalar load!");
16502 unsigned loadRegZize = RegSz;
16503 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16506 // Represent our vector as a sequence of elements which are the
16507 // largest scalar that we can load.
16508 EVT LoadUnitVecVT = EVT::getVectorVT(
16509 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16511 // Represent the data using the same element type that is stored in
16512 // memory. In practice, we ''widen'' MemVT.
16514 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16515 loadRegZize / MemVT.getScalarType().getSizeInBits());
16517 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16518 "Invalid vector type");
16520 // We can't shuffle using an illegal type.
16521 assert(TLI.isTypeLegal(WideVecVT) &&
16522 "We only lower types that form legal widened vector types");
16524 SmallVector<SDValue, 8> Chains;
16525 SDValue Ptr = Ld->getBasePtr();
16526 SDValue Increment =
16527 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16528 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16530 for (unsigned i = 0; i < NumLoads; ++i) {
16531 // Perform a single load.
16532 SDValue ScalarLoad =
16533 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16534 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16535 Ld->getAlignment());
16536 Chains.push_back(ScalarLoad.getValue(1));
16537 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16538 // another round of DAGCombining.
16540 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16542 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16543 ScalarLoad, DAG.getIntPtrConstant(i));
16545 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16548 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16550 // Bitcast the loaded value to a vector of the original element type, in
16551 // the size of the target vector type.
16552 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16553 unsigned SizeRatio = RegSz / MemSz;
16555 if (Ext == ISD::SEXTLOAD) {
16556 // If we have SSE4.1, we can directly emit a VSEXT node.
16557 if (Subtarget->hasSSE41()) {
16558 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16559 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16563 // Otherwise we'll shuffle the small elements in the high bits of the
16564 // larger type and perform an arithmetic shift. If the shift is not legal
16565 // it's better to scalarize.
16566 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16567 "We can't implement a sext load without an arithmetic right shift!");
16569 // Redistribute the loaded elements into the different locations.
16570 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16571 for (unsigned i = 0; i != NumElems; ++i)
16572 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16574 SDValue Shuff = DAG.getVectorShuffle(
16575 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16577 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16579 // Build the arithmetic shift.
16580 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16581 MemVT.getVectorElementType().getSizeInBits();
16583 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16585 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16589 // Redistribute the loaded elements into the different locations.
16590 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16591 for (unsigned i = 0; i != NumElems; ++i)
16592 ShuffleVec[i * SizeRatio] = i;
16594 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16595 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16597 // Bitcast to the requested type.
16598 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16599 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16603 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16604 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16605 // from the AND / OR.
16606 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16607 Opc = Op.getOpcode();
16608 if (Opc != ISD::OR && Opc != ISD::AND)
16610 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16611 Op.getOperand(0).hasOneUse() &&
16612 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16613 Op.getOperand(1).hasOneUse());
16616 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16617 // 1 and that the SETCC node has a single use.
16618 static bool isXor1OfSetCC(SDValue Op) {
16619 if (Op.getOpcode() != ISD::XOR)
16621 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16622 if (N1C && N1C->getAPIntValue() == 1) {
16623 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16624 Op.getOperand(0).hasOneUse();
16629 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16630 bool addTest = true;
16631 SDValue Chain = Op.getOperand(0);
16632 SDValue Cond = Op.getOperand(1);
16633 SDValue Dest = Op.getOperand(2);
16636 bool Inverted = false;
16638 if (Cond.getOpcode() == ISD::SETCC) {
16639 // Check for setcc([su]{add,sub,mul}o == 0).
16640 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16641 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16642 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16643 Cond.getOperand(0).getResNo() == 1 &&
16644 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16645 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16646 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16647 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16648 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16649 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16651 Cond = Cond.getOperand(0);
16653 SDValue NewCond = LowerSETCC(Cond, DAG);
16654 if (NewCond.getNode())
16659 // FIXME: LowerXALUO doesn't handle these!!
16660 else if (Cond.getOpcode() == X86ISD::ADD ||
16661 Cond.getOpcode() == X86ISD::SUB ||
16662 Cond.getOpcode() == X86ISD::SMUL ||
16663 Cond.getOpcode() == X86ISD::UMUL)
16664 Cond = LowerXALUO(Cond, DAG);
16667 // Look pass (and (setcc_carry (cmp ...)), 1).
16668 if (Cond.getOpcode() == ISD::AND &&
16669 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16670 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16671 if (C && C->getAPIntValue() == 1)
16672 Cond = Cond.getOperand(0);
16675 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16676 // setting operand in place of the X86ISD::SETCC.
16677 unsigned CondOpcode = Cond.getOpcode();
16678 if (CondOpcode == X86ISD::SETCC ||
16679 CondOpcode == X86ISD::SETCC_CARRY) {
16680 CC = Cond.getOperand(0);
16682 SDValue Cmp = Cond.getOperand(1);
16683 unsigned Opc = Cmp.getOpcode();
16684 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16685 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16689 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16693 // These can only come from an arithmetic instruction with overflow,
16694 // e.g. SADDO, UADDO.
16695 Cond = Cond.getNode()->getOperand(1);
16701 CondOpcode = Cond.getOpcode();
16702 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16703 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16704 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16705 Cond.getOperand(0).getValueType() != MVT::i8)) {
16706 SDValue LHS = Cond.getOperand(0);
16707 SDValue RHS = Cond.getOperand(1);
16708 unsigned X86Opcode;
16711 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16712 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16714 switch (CondOpcode) {
16715 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16717 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16719 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16722 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16723 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16725 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16727 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16730 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16731 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16732 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16733 default: llvm_unreachable("unexpected overflowing operator");
16736 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16737 if (CondOpcode == ISD::UMULO)
16738 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16741 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16743 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16745 if (CondOpcode == ISD::UMULO)
16746 Cond = X86Op.getValue(2);
16748 Cond = X86Op.getValue(1);
16750 CC = DAG.getConstant(X86Cond, MVT::i8);
16754 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16755 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16756 if (CondOpc == ISD::OR) {
16757 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16758 // two branches instead of an explicit OR instruction with a
16760 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16761 isX86LogicalCmp(Cmp)) {
16762 CC = Cond.getOperand(0).getOperand(0);
16763 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16764 Chain, Dest, CC, Cmp);
16765 CC = Cond.getOperand(1).getOperand(0);
16769 } else { // ISD::AND
16770 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16771 // two branches instead of an explicit AND instruction with a
16772 // separate test. However, we only do this if this block doesn't
16773 // have a fall-through edge, because this requires an explicit
16774 // jmp when the condition is false.
16775 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16776 isX86LogicalCmp(Cmp) &&
16777 Op.getNode()->hasOneUse()) {
16778 X86::CondCode CCode =
16779 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16780 CCode = X86::GetOppositeBranchCondition(CCode);
16781 CC = DAG.getConstant(CCode, MVT::i8);
16782 SDNode *User = *Op.getNode()->use_begin();
16783 // Look for an unconditional branch following this conditional branch.
16784 // We need this because we need to reverse the successors in order
16785 // to implement FCMP_OEQ.
16786 if (User->getOpcode() == ISD::BR) {
16787 SDValue FalseBB = User->getOperand(1);
16789 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16790 assert(NewBR == User);
16794 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16795 Chain, Dest, CC, Cmp);
16796 X86::CondCode CCode =
16797 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16798 CCode = X86::GetOppositeBranchCondition(CCode);
16799 CC = DAG.getConstant(CCode, MVT::i8);
16805 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16806 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16807 // It should be transformed during dag combiner except when the condition
16808 // is set by a arithmetics with overflow node.
16809 X86::CondCode CCode =
16810 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16811 CCode = X86::GetOppositeBranchCondition(CCode);
16812 CC = DAG.getConstant(CCode, MVT::i8);
16813 Cond = Cond.getOperand(0).getOperand(1);
16815 } else if (Cond.getOpcode() == ISD::SETCC &&
16816 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16817 // For FCMP_OEQ, we can emit
16818 // two branches instead of an explicit AND instruction with a
16819 // separate test. However, we only do this if this block doesn't
16820 // have a fall-through edge, because this requires an explicit
16821 // jmp when the condition is false.
16822 if (Op.getNode()->hasOneUse()) {
16823 SDNode *User = *Op.getNode()->use_begin();
16824 // Look for an unconditional branch following this conditional branch.
16825 // We need this because we need to reverse the successors in order
16826 // to implement FCMP_OEQ.
16827 if (User->getOpcode() == ISD::BR) {
16828 SDValue FalseBB = User->getOperand(1);
16830 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16831 assert(NewBR == User);
16835 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16836 Cond.getOperand(0), Cond.getOperand(1));
16837 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16838 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16839 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16840 Chain, Dest, CC, Cmp);
16841 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16846 } else if (Cond.getOpcode() == ISD::SETCC &&
16847 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16848 // For FCMP_UNE, we can emit
16849 // two branches instead of an explicit AND instruction with a
16850 // separate test. However, we only do this if this block doesn't
16851 // have a fall-through edge, because this requires an explicit
16852 // jmp when the condition is false.
16853 if (Op.getNode()->hasOneUse()) {
16854 SDNode *User = *Op.getNode()->use_begin();
16855 // Look for an unconditional branch following this conditional branch.
16856 // We need this because we need to reverse the successors in order
16857 // to implement FCMP_UNE.
16858 if (User->getOpcode() == ISD::BR) {
16859 SDValue FalseBB = User->getOperand(1);
16861 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16862 assert(NewBR == User);
16865 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16866 Cond.getOperand(0), Cond.getOperand(1));
16867 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16868 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16869 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16870 Chain, Dest, CC, Cmp);
16871 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
16881 // Look pass the truncate if the high bits are known zero.
16882 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16883 Cond = Cond.getOperand(0);
16885 // We know the result of AND is compared against zero. Try to match
16887 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16888 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
16889 if (NewSetCC.getNode()) {
16890 CC = NewSetCC.getOperand(0);
16891 Cond = NewSetCC.getOperand(1);
16898 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16899 CC = DAG.getConstant(X86Cond, MVT::i8);
16900 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16902 Cond = ConvertCmpIfNecessary(Cond, DAG);
16903 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16904 Chain, Dest, CC, Cond);
16907 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
16908 // Calls to _alloca are needed to probe the stack when allocating more than 4k
16909 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
16910 // that the guard pages used by the OS virtual memory manager are allocated in
16911 // correct sequence.
16913 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
16914 SelectionDAG &DAG) const {
16915 MachineFunction &MF = DAG.getMachineFunction();
16916 bool SplitStack = MF.shouldSplitStack();
16917 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
16922 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16923 SDNode* Node = Op.getNode();
16925 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
16926 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
16927 " not tell us which reg is the stack pointer!");
16928 EVT VT = Node->getValueType(0);
16929 SDValue Tmp1 = SDValue(Node, 0);
16930 SDValue Tmp2 = SDValue(Node, 1);
16931 SDValue Tmp3 = Node->getOperand(2);
16932 SDValue Chain = Tmp1.getOperand(0);
16934 // Chain the dynamic stack allocation so that it doesn't modify the stack
16935 // pointer when other instructions are using the stack.
16936 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
16939 SDValue Size = Tmp2.getOperand(1);
16940 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
16941 Chain = SP.getValue(1);
16942 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
16943 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
16944 unsigned StackAlign = TFI.getStackAlignment();
16945 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
16946 if (Align > StackAlign)
16947 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
16948 DAG.getConstant(-(uint64_t)Align, VT));
16949 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
16951 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
16952 DAG.getIntPtrConstant(0, true), SDValue(),
16955 SDValue Ops[2] = { Tmp1, Tmp2 };
16956 return DAG.getMergeValues(Ops, dl);
16960 SDValue Chain = Op.getOperand(0);
16961 SDValue Size = Op.getOperand(1);
16962 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
16963 EVT VT = Op.getNode()->getValueType(0);
16965 bool Is64Bit = Subtarget->is64Bit();
16966 EVT SPTy = getPointerTy();
16969 MachineRegisterInfo &MRI = MF.getRegInfo();
16972 // The 64 bit implementation of segmented stacks needs to clobber both r10
16973 // r11. This makes it impossible to use it along with nested parameters.
16974 const Function *F = MF.getFunction();
16976 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
16978 if (I->hasNestAttr())
16979 report_fatal_error("Cannot use segmented stacks with functions that "
16980 "have nested arguments.");
16983 const TargetRegisterClass *AddrRegClass =
16984 getRegClassFor(getPointerTy());
16985 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
16986 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
16987 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
16988 DAG.getRegister(Vreg, SPTy));
16989 SDValue Ops1[2] = { Value, Chain };
16990 return DAG.getMergeValues(Ops1, dl);
16993 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
16995 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
16996 Flag = Chain.getValue(1);
16997 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
16999 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
17001 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17002 unsigned SPReg = RegInfo->getStackRegister();
17003 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
17004 Chain = SP.getValue(1);
17007 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
17008 DAG.getConstant(-(uint64_t)Align, VT));
17009 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
17012 SDValue Ops1[2] = { SP, Chain };
17013 return DAG.getMergeValues(Ops1, dl);
17017 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
17018 MachineFunction &MF = DAG.getMachineFunction();
17019 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
17021 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17024 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
17025 // vastart just stores the address of the VarArgsFrameIndex slot into the
17026 // memory location argument.
17027 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17029 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
17030 MachinePointerInfo(SV), false, false, 0);
17034 // gp_offset (0 - 6 * 8)
17035 // fp_offset (48 - 48 + 8 * 16)
17036 // overflow_arg_area (point to parameters coming in memory).
17038 SmallVector<SDValue, 8> MemOps;
17039 SDValue FIN = Op.getOperand(1);
17041 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
17042 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
17044 FIN, MachinePointerInfo(SV), false, false, 0);
17045 MemOps.push_back(Store);
17048 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17049 FIN, DAG.getIntPtrConstant(4));
17050 Store = DAG.getStore(Op.getOperand(0), DL,
17051 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
17053 FIN, MachinePointerInfo(SV, 4), false, false, 0);
17054 MemOps.push_back(Store);
17056 // Store ptr to overflow_arg_area
17057 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17058 FIN, DAG.getIntPtrConstant(4));
17059 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17061 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
17062 MachinePointerInfo(SV, 8),
17064 MemOps.push_back(Store);
17066 // Store ptr to reg_save_area.
17067 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17068 FIN, DAG.getIntPtrConstant(8));
17069 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
17071 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
17072 MachinePointerInfo(SV, 16), false, false, 0);
17073 MemOps.push_back(Store);
17074 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
17077 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
17078 assert(Subtarget->is64Bit() &&
17079 "LowerVAARG only handles 64-bit va_arg!");
17080 assert((Subtarget->isTargetLinux() ||
17081 Subtarget->isTargetDarwin()) &&
17082 "Unhandled target in LowerVAARG");
17083 assert(Op.getNode()->getNumOperands() == 4);
17084 SDValue Chain = Op.getOperand(0);
17085 SDValue SrcPtr = Op.getOperand(1);
17086 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17087 unsigned Align = Op.getConstantOperandVal(3);
17090 EVT ArgVT = Op.getNode()->getValueType(0);
17091 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
17092 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
17095 // Decide which area this value should be read from.
17096 // TODO: Implement the AMD64 ABI in its entirety. This simple
17097 // selection mechanism works only for the basic types.
17098 if (ArgVT == MVT::f80) {
17099 llvm_unreachable("va_arg for f80 not yet implemented");
17100 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
17101 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
17102 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
17103 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
17105 llvm_unreachable("Unhandled argument type in LowerVAARG");
17108 if (ArgMode == 2) {
17109 // Sanity Check: Make sure using fp_offset makes sense.
17110 assert(!DAG.getTarget().Options.UseSoftFloat &&
17111 !(DAG.getMachineFunction().getFunction()->hasFnAttribute(
17112 Attribute::NoImplicitFloat)) &&
17113 Subtarget->hasSSE1());
17116 // Insert VAARG_64 node into the DAG
17117 // VAARG_64 returns two values: Variable Argument Address, Chain
17118 SmallVector<SDValue, 11> InstOps;
17119 InstOps.push_back(Chain);
17120 InstOps.push_back(SrcPtr);
17121 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
17122 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
17123 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
17124 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
17125 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
17126 VTs, InstOps, MVT::i64,
17127 MachinePointerInfo(SV),
17129 /*Volatile=*/false,
17131 /*WriteMem=*/true);
17132 Chain = VAARG.getValue(1);
17134 // Load the next argument and return it
17135 return DAG.getLoad(ArgVT, dl,
17138 MachinePointerInfo(),
17139 false, false, false, 0);
17142 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
17143 SelectionDAG &DAG) {
17144 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
17145 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
17146 SDValue Chain = Op.getOperand(0);
17147 SDValue DstPtr = Op.getOperand(1);
17148 SDValue SrcPtr = Op.getOperand(2);
17149 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
17150 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17153 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
17154 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
17156 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
17159 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
17160 // amount is a constant. Takes immediate version of shift as input.
17161 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
17162 SDValue SrcOp, uint64_t ShiftAmt,
17163 SelectionDAG &DAG) {
17164 MVT ElementType = VT.getVectorElementType();
17166 // Fold this packed shift into its first operand if ShiftAmt is 0.
17170 // Check for ShiftAmt >= element width
17171 if (ShiftAmt >= ElementType.getSizeInBits()) {
17172 if (Opc == X86ISD::VSRAI)
17173 ShiftAmt = ElementType.getSizeInBits() - 1;
17175 return DAG.getConstant(0, VT);
17178 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
17179 && "Unknown target vector shift-by-constant node");
17181 // Fold this packed vector shift into a build vector if SrcOp is a
17182 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
17183 if (VT == SrcOp.getSimpleValueType() &&
17184 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
17185 SmallVector<SDValue, 8> Elts;
17186 unsigned NumElts = SrcOp->getNumOperands();
17187 ConstantSDNode *ND;
17190 default: llvm_unreachable(nullptr);
17191 case X86ISD::VSHLI:
17192 for (unsigned i=0; i!=NumElts; ++i) {
17193 SDValue CurrentOp = SrcOp->getOperand(i);
17194 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17195 Elts.push_back(CurrentOp);
17198 ND = cast<ConstantSDNode>(CurrentOp);
17199 const APInt &C = ND->getAPIntValue();
17200 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
17203 case X86ISD::VSRLI:
17204 for (unsigned i=0; i!=NumElts; ++i) {
17205 SDValue CurrentOp = SrcOp->getOperand(i);
17206 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17207 Elts.push_back(CurrentOp);
17210 ND = cast<ConstantSDNode>(CurrentOp);
17211 const APInt &C = ND->getAPIntValue();
17212 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
17215 case X86ISD::VSRAI:
17216 for (unsigned i=0; i!=NumElts; ++i) {
17217 SDValue CurrentOp = SrcOp->getOperand(i);
17218 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17219 Elts.push_back(CurrentOp);
17222 ND = cast<ConstantSDNode>(CurrentOp);
17223 const APInt &C = ND->getAPIntValue();
17224 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
17229 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
17232 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
17235 // getTargetVShiftNode - Handle vector element shifts where the shift amount
17236 // may or may not be a constant. Takes immediate version of shift as input.
17237 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
17238 SDValue SrcOp, SDValue ShAmt,
17239 SelectionDAG &DAG) {
17240 MVT SVT = ShAmt.getSimpleValueType();
17241 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
17243 // Catch shift-by-constant.
17244 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
17245 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
17246 CShAmt->getZExtValue(), DAG);
17248 // Change opcode to non-immediate version
17250 default: llvm_unreachable("Unknown target vector shift node");
17251 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
17252 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
17253 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
17256 const X86Subtarget &Subtarget =
17257 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17258 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
17259 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
17260 // Let the shuffle legalizer expand this shift amount node.
17261 SDValue Op0 = ShAmt.getOperand(0);
17262 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
17263 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
17265 // Need to build a vector containing shift amount.
17266 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
17267 SmallVector<SDValue, 4> ShOps;
17268 ShOps.push_back(ShAmt);
17269 if (SVT == MVT::i32) {
17270 ShOps.push_back(DAG.getConstant(0, SVT));
17271 ShOps.push_back(DAG.getUNDEF(SVT));
17273 ShOps.push_back(DAG.getUNDEF(SVT));
17275 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
17276 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
17279 // The return type has to be a 128-bit type with the same element
17280 // type as the input type.
17281 MVT EltVT = VT.getVectorElementType();
17282 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
17284 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
17285 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
17288 /// \brief Return (and \p Op, \p Mask) for compare instructions or
17289 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
17290 /// necessary casting for \p Mask when lowering masking intrinsics.
17291 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
17292 SDValue PreservedSrc,
17293 const X86Subtarget *Subtarget,
17294 SelectionDAG &DAG) {
17295 EVT VT = Op.getValueType();
17296 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
17297 MVT::i1, VT.getVectorNumElements());
17298 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17299 Mask.getValueType().getSizeInBits());
17302 assert(MaskVT.isSimple() && "invalid mask type");
17304 if (isAllOnes(Mask))
17307 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17308 // are extracted by EXTRACT_SUBVECTOR.
17309 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17310 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17311 DAG.getIntPtrConstant(0));
17313 switch (Op.getOpcode()) {
17315 case X86ISD::PCMPEQM:
17316 case X86ISD::PCMPGTM:
17318 case X86ISD::CMPMU:
17319 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17321 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17322 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17323 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17326 /// \brief Creates an SDNode for a predicated scalar operation.
17327 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17328 /// The mask is comming as MVT::i8 and it should be truncated
17329 /// to MVT::i1 while lowering masking intrinsics.
17330 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17331 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17332 /// a scalar instruction.
17333 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17334 SDValue PreservedSrc,
17335 const X86Subtarget *Subtarget,
17336 SelectionDAG &DAG) {
17337 if (isAllOnes(Mask))
17340 EVT VT = Op.getValueType();
17342 // The mask should be of type MVT::i1
17343 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17345 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17346 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17347 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17350 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17351 SelectionDAG &DAG) {
17353 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17354 EVT VT = Op.getValueType();
17355 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17357 switch(IntrData->Type) {
17358 case INTR_TYPE_1OP:
17359 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17360 case INTR_TYPE_2OP:
17361 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17363 case INTR_TYPE_3OP:
17364 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17365 Op.getOperand(2), Op.getOperand(3));
17366 case INTR_TYPE_1OP_MASK_RM: {
17367 SDValue Src = Op.getOperand(1);
17368 SDValue Src0 = Op.getOperand(2);
17369 SDValue Mask = Op.getOperand(3);
17370 SDValue RoundingMode = Op.getOperand(4);
17371 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17373 Mask, Src0, Subtarget, DAG);
17375 case INTR_TYPE_SCALAR_MASK_RM: {
17376 SDValue Src1 = Op.getOperand(1);
17377 SDValue Src2 = Op.getOperand(2);
17378 SDValue Src0 = Op.getOperand(3);
17379 SDValue Mask = Op.getOperand(4);
17380 SDValue RoundingMode = Op.getOperand(5);
17381 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17383 Mask, Src0, Subtarget, DAG);
17385 case INTR_TYPE_2OP_MASK: {
17386 SDValue Mask = Op.getOperand(4);
17387 SDValue PassThru = Op.getOperand(3);
17388 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17389 if (IntrWithRoundingModeOpcode != 0) {
17390 unsigned Round = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
17391 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17392 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17393 dl, Op.getValueType(),
17394 Op.getOperand(1), Op.getOperand(2),
17395 Op.getOperand(3), Op.getOperand(5)),
17396 Mask, PassThru, Subtarget, DAG);
17399 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17402 Mask, PassThru, Subtarget, DAG);
17404 case FMA_OP_MASK: {
17405 SDValue Src1 = Op.getOperand(1);
17406 SDValue Src2 = Op.getOperand(2);
17407 SDValue Src3 = Op.getOperand(3);
17408 SDValue Mask = Op.getOperand(4);
17409 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17410 if (IntrWithRoundingModeOpcode != 0) {
17411 SDValue Rnd = Op.getOperand(5);
17412 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17413 X86::STATIC_ROUNDING::CUR_DIRECTION)
17414 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17415 dl, Op.getValueType(),
17416 Src1, Src2, Src3, Rnd),
17417 Mask, Src1, Subtarget, DAG);
17419 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17420 dl, Op.getValueType(),
17422 Mask, Src1, Subtarget, DAG);
17425 case CMP_MASK_CC: {
17426 // Comparison intrinsics with masks.
17427 // Example of transformation:
17428 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17429 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17431 // (v8i1 (insert_subvector undef,
17432 // (v2i1 (and (PCMPEQM %a, %b),
17433 // (extract_subvector
17434 // (v8i1 (bitcast %mask)), 0))), 0))))
17435 EVT VT = Op.getOperand(1).getValueType();
17436 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17437 VT.getVectorNumElements());
17438 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17439 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17440 Mask.getValueType().getSizeInBits());
17442 if (IntrData->Type == CMP_MASK_CC) {
17443 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17444 Op.getOperand(2), Op.getOperand(3));
17446 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17447 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17450 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17451 DAG.getTargetConstant(0, MaskVT),
17453 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17454 DAG.getUNDEF(BitcastVT), CmpMask,
17455 DAG.getIntPtrConstant(0));
17456 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17458 case COMI: { // Comparison intrinsics
17459 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17460 SDValue LHS = Op.getOperand(1);
17461 SDValue RHS = Op.getOperand(2);
17462 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17463 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17464 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17465 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17466 DAG.getConstant(X86CC, MVT::i8), Cond);
17467 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17470 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17471 Op.getOperand(1), Op.getOperand(2), DAG);
17473 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17474 Op.getSimpleValueType(),
17476 Op.getOperand(2), DAG),
17477 Op.getOperand(4), Op.getOperand(3), Subtarget,
17479 case COMPRESS_EXPAND_IN_REG: {
17480 SDValue Mask = Op.getOperand(3);
17481 SDValue DataToCompress = Op.getOperand(1);
17482 SDValue PassThru = Op.getOperand(2);
17483 if (isAllOnes(Mask)) // return data as is
17484 return Op.getOperand(1);
17485 EVT VT = Op.getValueType();
17486 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17487 VT.getVectorNumElements());
17488 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17489 Mask.getValueType().getSizeInBits());
17491 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17492 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17493 DAG.getIntPtrConstant(0));
17495 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17499 SDValue Mask = Op.getOperand(3);
17500 EVT VT = Op.getValueType();
17501 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17502 VT.getVectorNumElements());
17503 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17504 Mask.getValueType().getSizeInBits());
17506 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17507 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17508 DAG.getIntPtrConstant(0));
17509 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17518 default: return SDValue(); // Don't custom lower most intrinsics.
17520 case Intrinsic::x86_avx512_mask_valign_q_512:
17521 case Intrinsic::x86_avx512_mask_valign_d_512:
17522 // Vector source operands are swapped.
17523 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17524 Op.getValueType(), Op.getOperand(2),
17527 Op.getOperand(5), Op.getOperand(4),
17530 // ptest and testp intrinsics. The intrinsic these come from are designed to
17531 // return an integer value, not just an instruction so lower it to the ptest
17532 // or testp pattern and a setcc for the result.
17533 case Intrinsic::x86_sse41_ptestz:
17534 case Intrinsic::x86_sse41_ptestc:
17535 case Intrinsic::x86_sse41_ptestnzc:
17536 case Intrinsic::x86_avx_ptestz_256:
17537 case Intrinsic::x86_avx_ptestc_256:
17538 case Intrinsic::x86_avx_ptestnzc_256:
17539 case Intrinsic::x86_avx_vtestz_ps:
17540 case Intrinsic::x86_avx_vtestc_ps:
17541 case Intrinsic::x86_avx_vtestnzc_ps:
17542 case Intrinsic::x86_avx_vtestz_pd:
17543 case Intrinsic::x86_avx_vtestc_pd:
17544 case Intrinsic::x86_avx_vtestnzc_pd:
17545 case Intrinsic::x86_avx_vtestz_ps_256:
17546 case Intrinsic::x86_avx_vtestc_ps_256:
17547 case Intrinsic::x86_avx_vtestnzc_ps_256:
17548 case Intrinsic::x86_avx_vtestz_pd_256:
17549 case Intrinsic::x86_avx_vtestc_pd_256:
17550 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17551 bool IsTestPacked = false;
17554 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17555 case Intrinsic::x86_avx_vtestz_ps:
17556 case Intrinsic::x86_avx_vtestz_pd:
17557 case Intrinsic::x86_avx_vtestz_ps_256:
17558 case Intrinsic::x86_avx_vtestz_pd_256:
17559 IsTestPacked = true; // Fallthrough
17560 case Intrinsic::x86_sse41_ptestz:
17561 case Intrinsic::x86_avx_ptestz_256:
17563 X86CC = X86::COND_E;
17565 case Intrinsic::x86_avx_vtestc_ps:
17566 case Intrinsic::x86_avx_vtestc_pd:
17567 case Intrinsic::x86_avx_vtestc_ps_256:
17568 case Intrinsic::x86_avx_vtestc_pd_256:
17569 IsTestPacked = true; // Fallthrough
17570 case Intrinsic::x86_sse41_ptestc:
17571 case Intrinsic::x86_avx_ptestc_256:
17573 X86CC = X86::COND_B;
17575 case Intrinsic::x86_avx_vtestnzc_ps:
17576 case Intrinsic::x86_avx_vtestnzc_pd:
17577 case Intrinsic::x86_avx_vtestnzc_ps_256:
17578 case Intrinsic::x86_avx_vtestnzc_pd_256:
17579 IsTestPacked = true; // Fallthrough
17580 case Intrinsic::x86_sse41_ptestnzc:
17581 case Intrinsic::x86_avx_ptestnzc_256:
17583 X86CC = X86::COND_A;
17587 SDValue LHS = Op.getOperand(1);
17588 SDValue RHS = Op.getOperand(2);
17589 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17590 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17591 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17592 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17593 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17595 case Intrinsic::x86_avx512_kortestz_w:
17596 case Intrinsic::x86_avx512_kortestc_w: {
17597 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17598 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17599 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17600 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17601 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17602 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17603 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17606 case Intrinsic::x86_sse42_pcmpistria128:
17607 case Intrinsic::x86_sse42_pcmpestria128:
17608 case Intrinsic::x86_sse42_pcmpistric128:
17609 case Intrinsic::x86_sse42_pcmpestric128:
17610 case Intrinsic::x86_sse42_pcmpistrio128:
17611 case Intrinsic::x86_sse42_pcmpestrio128:
17612 case Intrinsic::x86_sse42_pcmpistris128:
17613 case Intrinsic::x86_sse42_pcmpestris128:
17614 case Intrinsic::x86_sse42_pcmpistriz128:
17615 case Intrinsic::x86_sse42_pcmpestriz128: {
17619 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17620 case Intrinsic::x86_sse42_pcmpistria128:
17621 Opcode = X86ISD::PCMPISTRI;
17622 X86CC = X86::COND_A;
17624 case Intrinsic::x86_sse42_pcmpestria128:
17625 Opcode = X86ISD::PCMPESTRI;
17626 X86CC = X86::COND_A;
17628 case Intrinsic::x86_sse42_pcmpistric128:
17629 Opcode = X86ISD::PCMPISTRI;
17630 X86CC = X86::COND_B;
17632 case Intrinsic::x86_sse42_pcmpestric128:
17633 Opcode = X86ISD::PCMPESTRI;
17634 X86CC = X86::COND_B;
17636 case Intrinsic::x86_sse42_pcmpistrio128:
17637 Opcode = X86ISD::PCMPISTRI;
17638 X86CC = X86::COND_O;
17640 case Intrinsic::x86_sse42_pcmpestrio128:
17641 Opcode = X86ISD::PCMPESTRI;
17642 X86CC = X86::COND_O;
17644 case Intrinsic::x86_sse42_pcmpistris128:
17645 Opcode = X86ISD::PCMPISTRI;
17646 X86CC = X86::COND_S;
17648 case Intrinsic::x86_sse42_pcmpestris128:
17649 Opcode = X86ISD::PCMPESTRI;
17650 X86CC = X86::COND_S;
17652 case Intrinsic::x86_sse42_pcmpistriz128:
17653 Opcode = X86ISD::PCMPISTRI;
17654 X86CC = X86::COND_E;
17656 case Intrinsic::x86_sse42_pcmpestriz128:
17657 Opcode = X86ISD::PCMPESTRI;
17658 X86CC = X86::COND_E;
17661 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17662 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17663 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17664 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17665 DAG.getConstant(X86CC, MVT::i8),
17666 SDValue(PCMP.getNode(), 1));
17667 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17670 case Intrinsic::x86_sse42_pcmpistri128:
17671 case Intrinsic::x86_sse42_pcmpestri128: {
17673 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17674 Opcode = X86ISD::PCMPISTRI;
17676 Opcode = X86ISD::PCMPESTRI;
17678 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17679 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17680 return DAG.getNode(Opcode, dl, VTs, NewOps);
17685 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17686 SDValue Src, SDValue Mask, SDValue Base,
17687 SDValue Index, SDValue ScaleOp, SDValue Chain,
17688 const X86Subtarget * Subtarget) {
17690 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17691 assert(C && "Invalid scale type");
17692 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17693 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17694 Index.getSimpleValueType().getVectorNumElements());
17696 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17698 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17700 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17701 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17702 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17703 SDValue Segment = DAG.getRegister(0, MVT::i32);
17704 if (Src.getOpcode() == ISD::UNDEF)
17705 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17706 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17707 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17708 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17709 return DAG.getMergeValues(RetOps, dl);
17712 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17713 SDValue Src, SDValue Mask, SDValue Base,
17714 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17716 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17717 assert(C && "Invalid scale type");
17718 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17719 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17720 SDValue Segment = DAG.getRegister(0, MVT::i32);
17721 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17722 Index.getSimpleValueType().getVectorNumElements());
17724 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17726 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17728 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17729 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17730 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17731 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17732 return SDValue(Res, 1);
17735 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17736 SDValue Mask, SDValue Base, SDValue Index,
17737 SDValue ScaleOp, SDValue Chain) {
17739 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17740 assert(C && "Invalid scale type");
17741 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17742 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17743 SDValue Segment = DAG.getRegister(0, MVT::i32);
17745 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17747 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17749 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17751 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17752 //SDVTList VTs = DAG.getVTList(MVT::Other);
17753 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17754 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17755 return SDValue(Res, 0);
17758 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17759 // read performance monitor counters (x86_rdpmc).
17760 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17761 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17762 SmallVectorImpl<SDValue> &Results) {
17763 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17764 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17767 // The ECX register is used to select the index of the performance counter
17769 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17771 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17773 // Reads the content of a 64-bit performance counter and returns it in the
17774 // registers EDX:EAX.
17775 if (Subtarget->is64Bit()) {
17776 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17777 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17780 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17781 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17784 Chain = HI.getValue(1);
17786 if (Subtarget->is64Bit()) {
17787 // The EAX register is loaded with the low-order 32 bits. The EDX register
17788 // is loaded with the supported high-order bits of the counter.
17789 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17790 DAG.getConstant(32, MVT::i8));
17791 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17792 Results.push_back(Chain);
17796 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17797 SDValue Ops[] = { LO, HI };
17798 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17799 Results.push_back(Pair);
17800 Results.push_back(Chain);
17803 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17804 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17805 // also used to custom lower READCYCLECOUNTER nodes.
17806 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17807 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17808 SmallVectorImpl<SDValue> &Results) {
17809 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17810 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17813 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17814 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17815 // and the EAX register is loaded with the low-order 32 bits.
17816 if (Subtarget->is64Bit()) {
17817 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17818 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17821 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17822 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17825 SDValue Chain = HI.getValue(1);
17827 if (Opcode == X86ISD::RDTSCP_DAG) {
17828 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17830 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17831 // the ECX register. Add 'ecx' explicitly to the chain.
17832 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17834 // Explicitly store the content of ECX at the location passed in input
17835 // to the 'rdtscp' intrinsic.
17836 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17837 MachinePointerInfo(), false, false, 0);
17840 if (Subtarget->is64Bit()) {
17841 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17842 // the EAX register is loaded with the low-order 32 bits.
17843 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17844 DAG.getConstant(32, MVT::i8));
17845 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17846 Results.push_back(Chain);
17850 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17851 SDValue Ops[] = { LO, HI };
17852 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17853 Results.push_back(Pair);
17854 Results.push_back(Chain);
17857 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17858 SelectionDAG &DAG) {
17859 SmallVector<SDValue, 2> Results;
17861 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17863 return DAG.getMergeValues(Results, DL);
17867 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17868 SelectionDAG &DAG) {
17869 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17871 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17876 switch(IntrData->Type) {
17878 llvm_unreachable("Unknown Intrinsic Type");
17882 // Emit the node with the right value type.
17883 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17884 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17886 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17887 // Otherwise return the value from Rand, which is always 0, casted to i32.
17888 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17889 DAG.getConstant(1, Op->getValueType(1)),
17890 DAG.getConstant(X86::COND_B, MVT::i32),
17891 SDValue(Result.getNode(), 1) };
17892 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17893 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17896 // Return { result, isValid, chain }.
17897 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17898 SDValue(Result.getNode(), 2));
17901 //gather(v1, mask, index, base, scale);
17902 SDValue Chain = Op.getOperand(0);
17903 SDValue Src = Op.getOperand(2);
17904 SDValue Base = Op.getOperand(3);
17905 SDValue Index = Op.getOperand(4);
17906 SDValue Mask = Op.getOperand(5);
17907 SDValue Scale = Op.getOperand(6);
17908 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
17912 //scatter(base, mask, index, v1, scale);
17913 SDValue Chain = Op.getOperand(0);
17914 SDValue Base = Op.getOperand(2);
17915 SDValue Mask = Op.getOperand(3);
17916 SDValue Index = Op.getOperand(4);
17917 SDValue Src = Op.getOperand(5);
17918 SDValue Scale = Op.getOperand(6);
17919 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
17922 SDValue Hint = Op.getOperand(6);
17924 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
17925 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
17926 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
17927 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
17928 SDValue Chain = Op.getOperand(0);
17929 SDValue Mask = Op.getOperand(2);
17930 SDValue Index = Op.getOperand(3);
17931 SDValue Base = Op.getOperand(4);
17932 SDValue Scale = Op.getOperand(5);
17933 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
17935 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
17937 SmallVector<SDValue, 2> Results;
17938 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
17939 return DAG.getMergeValues(Results, dl);
17941 // Read Performance Monitoring Counters.
17943 SmallVector<SDValue, 2> Results;
17944 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
17945 return DAG.getMergeValues(Results, dl);
17947 // XTEST intrinsics.
17949 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17950 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17951 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17952 DAG.getConstant(X86::COND_NE, MVT::i8),
17954 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
17955 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
17956 Ret, SDValue(InTrans.getNode(), 1));
17960 SmallVector<SDValue, 2> Results;
17961 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17962 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
17963 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
17964 DAG.getConstant(-1, MVT::i8));
17965 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
17966 Op.getOperand(4), GenCF.getValue(1));
17967 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
17968 Op.getOperand(5), MachinePointerInfo(),
17970 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17971 DAG.getConstant(X86::COND_B, MVT::i8),
17973 Results.push_back(SetCC);
17974 Results.push_back(Store);
17975 return DAG.getMergeValues(Results, dl);
17977 case COMPRESS_TO_MEM: {
17979 SDValue Mask = Op.getOperand(4);
17980 SDValue DataToCompress = Op.getOperand(3);
17981 SDValue Addr = Op.getOperand(2);
17982 SDValue Chain = Op.getOperand(0);
17984 if (isAllOnes(Mask)) // return just a store
17985 return DAG.getStore(Chain, dl, DataToCompress, Addr,
17986 MachinePointerInfo(), false, false, 0);
17988 EVT VT = DataToCompress.getValueType();
17989 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17990 VT.getVectorNumElements());
17991 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17992 Mask.getValueType().getSizeInBits());
17993 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17994 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17995 DAG.getIntPtrConstant(0));
17997 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
17998 DataToCompress, DAG.getUNDEF(VT));
17999 return DAG.getStore(Chain, dl, Compressed, Addr,
18000 MachinePointerInfo(), false, false, 0);
18002 case EXPAND_FROM_MEM: {
18004 SDValue Mask = Op.getOperand(4);
18005 SDValue PathThru = Op.getOperand(3);
18006 SDValue Addr = Op.getOperand(2);
18007 SDValue Chain = Op.getOperand(0);
18008 EVT VT = Op.getValueType();
18010 if (isAllOnes(Mask)) // return just a load
18011 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
18013 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18014 VT.getVectorNumElements());
18015 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18016 Mask.getValueType().getSizeInBits());
18017 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18018 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18019 DAG.getIntPtrConstant(0));
18021 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
18022 false, false, false, 0);
18024 SmallVector<SDValue, 2> Results;
18025 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
18027 Results.push_back(Chain);
18028 return DAG.getMergeValues(Results, dl);
18033 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
18034 SelectionDAG &DAG) const {
18035 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
18036 MFI->setReturnAddressIsTaken(true);
18038 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
18041 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18043 EVT PtrVT = getPointerTy();
18046 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
18047 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18048 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
18049 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18050 DAG.getNode(ISD::ADD, dl, PtrVT,
18051 FrameAddr, Offset),
18052 MachinePointerInfo(), false, false, false, 0);
18055 // Just load the return address.
18056 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
18057 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18058 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
18061 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
18062 MachineFunction &MF = DAG.getMachineFunction();
18063 MachineFrameInfo *MFI = MF.getFrameInfo();
18064 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
18065 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18066 EVT VT = Op.getValueType();
18068 MFI->setFrameAddressIsTaken(true);
18070 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
18071 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
18072 // is not possible to crawl up the stack without looking at the unwind codes
18074 int FrameAddrIndex = FuncInfo->getFAIndex();
18075 if (!FrameAddrIndex) {
18076 // Set up a frame object for the return address.
18077 unsigned SlotSize = RegInfo->getSlotSize();
18078 FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
18079 SlotSize, /*Offset=*/INT64_MIN, /*IsImmutable=*/false);
18080 FuncInfo->setFAIndex(FrameAddrIndex);
18082 return DAG.getFrameIndex(FrameAddrIndex, VT);
18085 unsigned FrameReg =
18086 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
18087 SDLoc dl(Op); // FIXME probably not meaningful
18088 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18089 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
18090 (FrameReg == X86::EBP && VT == MVT::i32)) &&
18091 "Invalid Frame Register!");
18092 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
18094 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
18095 MachinePointerInfo(),
18096 false, false, false, 0);
18100 // FIXME? Maybe this could be a TableGen attribute on some registers and
18101 // this table could be generated automatically from RegInfo.
18102 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
18104 unsigned Reg = StringSwitch<unsigned>(RegName)
18105 .Case("esp", X86::ESP)
18106 .Case("rsp", X86::RSP)
18110 report_fatal_error("Invalid register name global variable");
18113 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
18114 SelectionDAG &DAG) const {
18115 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18116 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
18119 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
18120 SDValue Chain = Op.getOperand(0);
18121 SDValue Offset = Op.getOperand(1);
18122 SDValue Handler = Op.getOperand(2);
18125 EVT PtrVT = getPointerTy();
18126 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18127 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
18128 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
18129 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
18130 "Invalid Frame Register!");
18131 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
18132 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
18134 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
18135 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
18136 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
18137 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
18139 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
18141 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
18142 DAG.getRegister(StoreAddrReg, PtrVT));
18145 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
18146 SelectionDAG &DAG) const {
18148 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
18149 DAG.getVTList(MVT::i32, MVT::Other),
18150 Op.getOperand(0), Op.getOperand(1));
18153 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
18154 SelectionDAG &DAG) const {
18156 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
18157 Op.getOperand(0), Op.getOperand(1));
18160 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
18161 return Op.getOperand(0);
18164 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
18165 SelectionDAG &DAG) const {
18166 SDValue Root = Op.getOperand(0);
18167 SDValue Trmp = Op.getOperand(1); // trampoline
18168 SDValue FPtr = Op.getOperand(2); // nested function
18169 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
18172 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
18173 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
18175 if (Subtarget->is64Bit()) {
18176 SDValue OutChains[6];
18178 // Large code-model.
18179 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
18180 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
18182 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
18183 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
18185 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
18187 // Load the pointer to the nested function into R11.
18188 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
18189 SDValue Addr = Trmp;
18190 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18191 Addr, MachinePointerInfo(TrmpAddr),
18194 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18195 DAG.getConstant(2, MVT::i64));
18196 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
18197 MachinePointerInfo(TrmpAddr, 2),
18200 // Load the 'nest' parameter value into R10.
18201 // R10 is specified in X86CallingConv.td
18202 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
18203 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18204 DAG.getConstant(10, MVT::i64));
18205 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18206 Addr, MachinePointerInfo(TrmpAddr, 10),
18209 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18210 DAG.getConstant(12, MVT::i64));
18211 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
18212 MachinePointerInfo(TrmpAddr, 12),
18215 // Jump to the nested function.
18216 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
18217 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18218 DAG.getConstant(20, MVT::i64));
18219 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18220 Addr, MachinePointerInfo(TrmpAddr, 20),
18223 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
18224 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18225 DAG.getConstant(22, MVT::i64));
18226 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
18227 MachinePointerInfo(TrmpAddr, 22),
18230 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18232 const Function *Func =
18233 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
18234 CallingConv::ID CC = Func->getCallingConv();
18239 llvm_unreachable("Unsupported calling convention");
18240 case CallingConv::C:
18241 case CallingConv::X86_StdCall: {
18242 // Pass 'nest' parameter in ECX.
18243 // Must be kept in sync with X86CallingConv.td
18244 NestReg = X86::ECX;
18246 // Check that ECX wasn't needed by an 'inreg' parameter.
18247 FunctionType *FTy = Func->getFunctionType();
18248 const AttributeSet &Attrs = Func->getAttributes();
18250 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18251 unsigned InRegCount = 0;
18254 for (FunctionType::param_iterator I = FTy->param_begin(),
18255 E = FTy->param_end(); I != E; ++I, ++Idx)
18256 if (Attrs.hasAttribute(Idx, Attribute::InReg))
18257 // FIXME: should only count parameters that are lowered to integers.
18258 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
18260 if (InRegCount > 2) {
18261 report_fatal_error("Nest register in use - reduce number of inreg"
18267 case CallingConv::X86_FastCall:
18268 case CallingConv::X86_ThisCall:
18269 case CallingConv::Fast:
18270 // Pass 'nest' parameter in EAX.
18271 // Must be kept in sync with X86CallingConv.td
18272 NestReg = X86::EAX;
18276 SDValue OutChains[4];
18277 SDValue Addr, Disp;
18279 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18280 DAG.getConstant(10, MVT::i32));
18281 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18283 // This is storing the opcode for MOV32ri.
18284 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18285 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18286 OutChains[0] = DAG.getStore(Root, dl,
18287 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
18288 Trmp, MachinePointerInfo(TrmpAddr),
18291 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18292 DAG.getConstant(1, MVT::i32));
18293 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
18294 MachinePointerInfo(TrmpAddr, 1),
18297 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18298 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18299 DAG.getConstant(5, MVT::i32));
18300 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
18301 MachinePointerInfo(TrmpAddr, 5),
18304 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18305 DAG.getConstant(6, MVT::i32));
18306 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18307 MachinePointerInfo(TrmpAddr, 6),
18310 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18314 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18315 SelectionDAG &DAG) const {
18317 The rounding mode is in bits 11:10 of FPSR, and has the following
18319 00 Round to nearest
18324 FLT_ROUNDS, on the other hand, expects the following:
18331 To perform the conversion, we do:
18332 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18335 MachineFunction &MF = DAG.getMachineFunction();
18336 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
18337 unsigned StackAlignment = TFI.getStackAlignment();
18338 MVT VT = Op.getSimpleValueType();
18341 // Save FP Control Word to stack slot
18342 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18343 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18345 MachineMemOperand *MMO =
18346 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18347 MachineMemOperand::MOStore, 2, 2);
18349 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18350 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18351 DAG.getVTList(MVT::Other),
18352 Ops, MVT::i16, MMO);
18354 // Load FP Control Word from stack slot
18355 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18356 MachinePointerInfo(), false, false, false, 0);
18358 // Transform as necessary
18360 DAG.getNode(ISD::SRL, DL, MVT::i16,
18361 DAG.getNode(ISD::AND, DL, MVT::i16,
18362 CWD, DAG.getConstant(0x800, MVT::i16)),
18363 DAG.getConstant(11, MVT::i8));
18365 DAG.getNode(ISD::SRL, DL, MVT::i16,
18366 DAG.getNode(ISD::AND, DL, MVT::i16,
18367 CWD, DAG.getConstant(0x400, MVT::i16)),
18368 DAG.getConstant(9, MVT::i8));
18371 DAG.getNode(ISD::AND, DL, MVT::i16,
18372 DAG.getNode(ISD::ADD, DL, MVT::i16,
18373 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18374 DAG.getConstant(1, MVT::i16)),
18375 DAG.getConstant(3, MVT::i16));
18377 return DAG.getNode((VT.getSizeInBits() < 16 ?
18378 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18381 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18382 MVT VT = Op.getSimpleValueType();
18384 unsigned NumBits = VT.getSizeInBits();
18387 Op = Op.getOperand(0);
18388 if (VT == MVT::i8) {
18389 // Zero extend to i32 since there is not an i8 bsr.
18391 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18394 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18395 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18396 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18398 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18401 DAG.getConstant(NumBits+NumBits-1, OpVT),
18402 DAG.getConstant(X86::COND_E, MVT::i8),
18405 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18407 // Finally xor with NumBits-1.
18408 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18411 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18415 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18416 MVT VT = Op.getSimpleValueType();
18418 unsigned NumBits = VT.getSizeInBits();
18421 Op = Op.getOperand(0);
18422 if (VT == MVT::i8) {
18423 // Zero extend to i32 since there is not an i8 bsr.
18425 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18428 // Issue a bsr (scan bits in reverse).
18429 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18430 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18432 // And xor with NumBits-1.
18433 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18436 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18440 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18441 MVT VT = Op.getSimpleValueType();
18442 unsigned NumBits = VT.getSizeInBits();
18444 Op = Op.getOperand(0);
18446 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18447 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18448 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18450 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18453 DAG.getConstant(NumBits, VT),
18454 DAG.getConstant(X86::COND_E, MVT::i8),
18457 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18460 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18461 // ones, and then concatenate the result back.
18462 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18463 MVT VT = Op.getSimpleValueType();
18465 assert(VT.is256BitVector() && VT.isInteger() &&
18466 "Unsupported value type for operation");
18468 unsigned NumElems = VT.getVectorNumElements();
18471 // Extract the LHS vectors
18472 SDValue LHS = Op.getOperand(0);
18473 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18474 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18476 // Extract the RHS vectors
18477 SDValue RHS = Op.getOperand(1);
18478 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18479 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18481 MVT EltVT = VT.getVectorElementType();
18482 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18484 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18485 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18486 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18489 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18490 assert(Op.getSimpleValueType().is256BitVector() &&
18491 Op.getSimpleValueType().isInteger() &&
18492 "Only handle AVX 256-bit vector integer operation");
18493 return Lower256IntArith(Op, DAG);
18496 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18497 assert(Op.getSimpleValueType().is256BitVector() &&
18498 Op.getSimpleValueType().isInteger() &&
18499 "Only handle AVX 256-bit vector integer operation");
18500 return Lower256IntArith(Op, DAG);
18503 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18504 SelectionDAG &DAG) {
18506 MVT VT = Op.getSimpleValueType();
18508 // Decompose 256-bit ops into smaller 128-bit ops.
18509 if (VT.is256BitVector() && !Subtarget->hasInt256())
18510 return Lower256IntArith(Op, DAG);
18512 SDValue A = Op.getOperand(0);
18513 SDValue B = Op.getOperand(1);
18515 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18516 if (VT == MVT::v4i32) {
18517 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18518 "Should not custom lower when pmuldq is available!");
18520 // Extract the odd parts.
18521 static const int UnpackMask[] = { 1, -1, 3, -1 };
18522 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18523 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18525 // Multiply the even parts.
18526 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18527 // Now multiply odd parts.
18528 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18530 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18531 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18533 // Merge the two vectors back together with a shuffle. This expands into 2
18535 static const int ShufMask[] = { 0, 4, 2, 6 };
18536 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18539 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18540 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18542 // Ahi = psrlqi(a, 32);
18543 // Bhi = psrlqi(b, 32);
18545 // AloBlo = pmuludq(a, b);
18546 // AloBhi = pmuludq(a, Bhi);
18547 // AhiBlo = pmuludq(Ahi, b);
18549 // AloBhi = psllqi(AloBhi, 32);
18550 // AhiBlo = psllqi(AhiBlo, 32);
18551 // return AloBlo + AloBhi + AhiBlo;
18553 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18554 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18556 // Bit cast to 32-bit vectors for MULUDQ
18557 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18558 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18559 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18560 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18561 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18562 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18564 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18565 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18566 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18568 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18569 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18571 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18572 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18575 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18576 assert(Subtarget->isTargetWin64() && "Unexpected target");
18577 EVT VT = Op.getValueType();
18578 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18579 "Unexpected return type for lowering");
18583 switch (Op->getOpcode()) {
18584 default: llvm_unreachable("Unexpected request for libcall!");
18585 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18586 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18587 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18588 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18589 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18590 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18594 SDValue InChain = DAG.getEntryNode();
18596 TargetLowering::ArgListTy Args;
18597 TargetLowering::ArgListEntry Entry;
18598 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18599 EVT ArgVT = Op->getOperand(i).getValueType();
18600 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18601 "Unexpected argument type for lowering");
18602 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18603 Entry.Node = StackPtr;
18604 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18606 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18607 Entry.Ty = PointerType::get(ArgTy,0);
18608 Entry.isSExt = false;
18609 Entry.isZExt = false;
18610 Args.push_back(Entry);
18613 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18616 TargetLowering::CallLoweringInfo CLI(DAG);
18617 CLI.setDebugLoc(dl).setChain(InChain)
18618 .setCallee(getLibcallCallingConv(LC),
18619 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18620 Callee, std::move(Args), 0)
18621 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18623 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18624 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18627 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18628 SelectionDAG &DAG) {
18629 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18630 EVT VT = Op0.getValueType();
18633 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18634 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18636 // PMULxD operations multiply each even value (starting at 0) of LHS with
18637 // the related value of RHS and produce a widen result.
18638 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18639 // => <2 x i64> <ae|cg>
18641 // In other word, to have all the results, we need to perform two PMULxD:
18642 // 1. one with the even values.
18643 // 2. one with the odd values.
18644 // To achieve #2, with need to place the odd values at an even position.
18646 // Place the odd value at an even position (basically, shift all values 1
18647 // step to the left):
18648 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18649 // <a|b|c|d> => <b|undef|d|undef>
18650 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18651 // <e|f|g|h> => <f|undef|h|undef>
18652 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18654 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18656 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18657 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18659 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18660 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18661 // => <2 x i64> <ae|cg>
18662 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18663 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18664 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18665 // => <2 x i64> <bf|dh>
18666 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18667 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18669 // Shuffle it back into the right order.
18670 SDValue Highs, Lows;
18671 if (VT == MVT::v8i32) {
18672 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18673 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18674 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18675 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18677 const int HighMask[] = {1, 5, 3, 7};
18678 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18679 const int LowMask[] = {0, 4, 2, 6};
18680 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18683 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18684 // unsigned multiply.
18685 if (IsSigned && !Subtarget->hasSSE41()) {
18687 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18688 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18689 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18690 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18691 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18693 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18694 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18697 // The first result of MUL_LOHI is actually the low value, followed by the
18699 SDValue Ops[] = {Lows, Highs};
18700 return DAG.getMergeValues(Ops, dl);
18703 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18704 const X86Subtarget *Subtarget) {
18705 MVT VT = Op.getSimpleValueType();
18707 SDValue R = Op.getOperand(0);
18708 SDValue Amt = Op.getOperand(1);
18710 // Optimize shl/srl/sra with constant shift amount.
18711 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18712 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18713 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18715 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18716 (Subtarget->hasInt256() &&
18717 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18718 (Subtarget->hasAVX512() &&
18719 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18720 if (Op.getOpcode() == ISD::SHL)
18721 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18723 if (Op.getOpcode() == ISD::SRL)
18724 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18726 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18727 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18731 if (VT == MVT::v16i8) {
18732 if (Op.getOpcode() == ISD::SHL) {
18733 // Make a large shift.
18734 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18735 MVT::v8i16, R, ShiftAmt,
18737 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18738 // Zero out the rightmost bits.
18739 SmallVector<SDValue, 16> V(16,
18740 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18742 return DAG.getNode(ISD::AND, dl, VT, SHL,
18743 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18745 if (Op.getOpcode() == ISD::SRL) {
18746 // Make a large shift.
18747 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18748 MVT::v8i16, R, ShiftAmt,
18750 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18751 // Zero out the leftmost bits.
18752 SmallVector<SDValue, 16> V(16,
18753 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18755 return DAG.getNode(ISD::AND, dl, VT, SRL,
18756 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18758 if (Op.getOpcode() == ISD::SRA) {
18759 if (ShiftAmt == 7) {
18760 // R s>> 7 === R s< 0
18761 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18762 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18765 // R s>> a === ((R u>> a) ^ m) - m
18766 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18767 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18769 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18770 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18771 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18774 llvm_unreachable("Unknown shift opcode.");
18777 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18778 if (Op.getOpcode() == ISD::SHL) {
18779 // Make a large shift.
18780 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18781 MVT::v16i16, R, ShiftAmt,
18783 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18784 // Zero out the rightmost bits.
18785 SmallVector<SDValue, 32> V(32,
18786 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18788 return DAG.getNode(ISD::AND, dl, VT, SHL,
18789 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18791 if (Op.getOpcode() == ISD::SRL) {
18792 // Make a large shift.
18793 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18794 MVT::v16i16, R, ShiftAmt,
18796 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18797 // Zero out the leftmost bits.
18798 SmallVector<SDValue, 32> V(32,
18799 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18801 return DAG.getNode(ISD::AND, dl, VT, SRL,
18802 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18804 if (Op.getOpcode() == ISD::SRA) {
18805 if (ShiftAmt == 7) {
18806 // R s>> 7 === R s< 0
18807 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18808 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18811 // R s>> a === ((R u>> a) ^ m) - m
18812 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18813 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18815 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18816 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18817 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18820 llvm_unreachable("Unknown shift opcode.");
18825 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18826 if (!Subtarget->is64Bit() &&
18827 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18828 Amt.getOpcode() == ISD::BITCAST &&
18829 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18830 Amt = Amt.getOperand(0);
18831 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18832 VT.getVectorNumElements();
18833 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18834 uint64_t ShiftAmt = 0;
18835 for (unsigned i = 0; i != Ratio; ++i) {
18836 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18840 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18842 // Check remaining shift amounts.
18843 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18844 uint64_t ShAmt = 0;
18845 for (unsigned j = 0; j != Ratio; ++j) {
18846 ConstantSDNode *C =
18847 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18851 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18853 if (ShAmt != ShiftAmt)
18856 switch (Op.getOpcode()) {
18858 llvm_unreachable("Unknown shift opcode!");
18860 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18863 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18866 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18874 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18875 const X86Subtarget* Subtarget) {
18876 MVT VT = Op.getSimpleValueType();
18878 SDValue R = Op.getOperand(0);
18879 SDValue Amt = Op.getOperand(1);
18881 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
18882 VT == MVT::v4i32 || VT == MVT::v8i16 ||
18883 (Subtarget->hasInt256() &&
18884 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
18885 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18886 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18888 EVT EltVT = VT.getVectorElementType();
18890 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18891 // Check if this build_vector node is doing a splat.
18892 // If so, then set BaseShAmt equal to the splat value.
18893 BaseShAmt = BV->getSplatValue();
18894 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18895 BaseShAmt = SDValue();
18897 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18898 Amt = Amt.getOperand(0);
18900 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18901 if (SVN && SVN->isSplat()) {
18902 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18903 SDValue InVec = Amt.getOperand(0);
18904 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18905 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
18906 "Unexpected shuffle index found!");
18907 BaseShAmt = InVec.getOperand(SplatIdx);
18908 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
18909 if (ConstantSDNode *C =
18910 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
18911 if (C->getZExtValue() == SplatIdx)
18912 BaseShAmt = InVec.getOperand(1);
18917 // Avoid introducing an extract element from a shuffle.
18918 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
18919 DAG.getIntPtrConstant(SplatIdx));
18923 if (BaseShAmt.getNode()) {
18924 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
18925 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
18926 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
18927 else if (EltVT.bitsLT(MVT::i32))
18928 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
18930 switch (Op.getOpcode()) {
18932 llvm_unreachable("Unknown shift opcode!");
18934 switch (VT.SimpleTy) {
18935 default: return SDValue();
18944 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
18947 switch (VT.SimpleTy) {
18948 default: return SDValue();
18955 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
18958 switch (VT.SimpleTy) {
18959 default: return SDValue();
18968 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
18974 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18975 if (!Subtarget->is64Bit() &&
18976 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
18977 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
18978 Amt.getOpcode() == ISD::BITCAST &&
18979 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18980 Amt = Amt.getOperand(0);
18981 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18982 VT.getVectorNumElements();
18983 std::vector<SDValue> Vals(Ratio);
18984 for (unsigned i = 0; i != Ratio; ++i)
18985 Vals[i] = Amt.getOperand(i);
18986 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18987 for (unsigned j = 0; j != Ratio; ++j)
18988 if (Vals[j] != Amt.getOperand(i + j))
18991 switch (Op.getOpcode()) {
18993 llvm_unreachable("Unknown shift opcode!");
18995 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
18997 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
18999 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
19006 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
19007 SelectionDAG &DAG) {
19008 MVT VT = Op.getSimpleValueType();
19010 SDValue R = Op.getOperand(0);
19011 SDValue Amt = Op.getOperand(1);
19014 assert(VT.isVector() && "Custom lowering only for vector shifts!");
19015 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
19017 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
19021 V = LowerScalarVariableShift(Op, DAG, Subtarget);
19025 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
19027 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
19028 if (Subtarget->hasInt256()) {
19029 if (Op.getOpcode() == ISD::SRL &&
19030 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19031 VT == MVT::v4i64 || VT == MVT::v8i32))
19033 if (Op.getOpcode() == ISD::SHL &&
19034 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19035 VT == MVT::v4i64 || VT == MVT::v8i32))
19037 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
19041 // If possible, lower this packed shift into a vector multiply instead of
19042 // expanding it into a sequence of scalar shifts.
19043 // Do this only if the vector shift count is a constant build_vector.
19044 if (Op.getOpcode() == ISD::SHL &&
19045 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
19046 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
19047 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19048 SmallVector<SDValue, 8> Elts;
19049 EVT SVT = VT.getScalarType();
19050 unsigned SVTBits = SVT.getSizeInBits();
19051 const APInt &One = APInt(SVTBits, 1);
19052 unsigned NumElems = VT.getVectorNumElements();
19054 for (unsigned i=0; i !=NumElems; ++i) {
19055 SDValue Op = Amt->getOperand(i);
19056 if (Op->getOpcode() == ISD::UNDEF) {
19057 Elts.push_back(Op);
19061 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
19062 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
19063 uint64_t ShAmt = C.getZExtValue();
19064 if (ShAmt >= SVTBits) {
19065 Elts.push_back(DAG.getUNDEF(SVT));
19068 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
19070 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
19071 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
19074 // Lower SHL with variable shift amount.
19075 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
19076 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
19078 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
19079 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
19080 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
19081 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
19084 // If possible, lower this shift as a sequence of two shifts by
19085 // constant plus a MOVSS/MOVSD instead of scalarizing it.
19087 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
19089 // Could be rewritten as:
19090 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
19092 // The advantage is that the two shifts from the example would be
19093 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
19094 // the vector shift into four scalar shifts plus four pairs of vector
19096 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
19097 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19098 unsigned TargetOpcode = X86ISD::MOVSS;
19099 bool CanBeSimplified;
19100 // The splat value for the first packed shift (the 'X' from the example).
19101 SDValue Amt1 = Amt->getOperand(0);
19102 // The splat value for the second packed shift (the 'Y' from the example).
19103 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
19104 Amt->getOperand(2);
19106 // See if it is possible to replace this node with a sequence of
19107 // two shifts followed by a MOVSS/MOVSD
19108 if (VT == MVT::v4i32) {
19109 // Check if it is legal to use a MOVSS.
19110 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
19111 Amt2 == Amt->getOperand(3);
19112 if (!CanBeSimplified) {
19113 // Otherwise, check if we can still simplify this node using a MOVSD.
19114 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
19115 Amt->getOperand(2) == Amt->getOperand(3);
19116 TargetOpcode = X86ISD::MOVSD;
19117 Amt2 = Amt->getOperand(2);
19120 // Do similar checks for the case where the machine value type
19122 CanBeSimplified = Amt1 == Amt->getOperand(1);
19123 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
19124 CanBeSimplified = Amt2 == Amt->getOperand(i);
19126 if (!CanBeSimplified) {
19127 TargetOpcode = X86ISD::MOVSD;
19128 CanBeSimplified = true;
19129 Amt2 = Amt->getOperand(4);
19130 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
19131 CanBeSimplified = Amt1 == Amt->getOperand(i);
19132 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
19133 CanBeSimplified = Amt2 == Amt->getOperand(j);
19137 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
19138 isa<ConstantSDNode>(Amt2)) {
19139 // Replace this node with two shifts followed by a MOVSS/MOVSD.
19140 EVT CastVT = MVT::v4i32;
19142 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
19143 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
19145 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
19146 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
19147 if (TargetOpcode == X86ISD::MOVSD)
19148 CastVT = MVT::v2i64;
19149 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
19150 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
19151 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
19153 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
19157 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
19158 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
19161 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
19162 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
19164 // Turn 'a' into a mask suitable for VSELECT
19165 SDValue VSelM = DAG.getConstant(0x80, VT);
19166 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19167 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19169 SDValue CM1 = DAG.getConstant(0x0f, VT);
19170 SDValue CM2 = DAG.getConstant(0x3f, VT);
19172 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
19173 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
19174 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
19175 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19176 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19179 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19180 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19181 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19183 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
19184 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
19185 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
19186 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19187 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19190 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19191 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19192 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19194 // return VSELECT(r, r+r, a);
19195 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
19196 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
19200 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
19201 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
19202 // solution better.
19203 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
19204 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
19206 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
19207 R = DAG.getNode(ExtOpc, dl, NewVT, R);
19208 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
19209 return DAG.getNode(ISD::TRUNCATE, dl, VT,
19210 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
19213 // Decompose 256-bit shifts into smaller 128-bit shifts.
19214 if (VT.is256BitVector()) {
19215 unsigned NumElems = VT.getVectorNumElements();
19216 MVT EltVT = VT.getVectorElementType();
19217 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19219 // Extract the two vectors
19220 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
19221 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
19223 // Recreate the shift amount vectors
19224 SDValue Amt1, Amt2;
19225 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
19226 // Constant shift amount
19227 SmallVector<SDValue, 4> Amt1Csts;
19228 SmallVector<SDValue, 4> Amt2Csts;
19229 for (unsigned i = 0; i != NumElems/2; ++i)
19230 Amt1Csts.push_back(Amt->getOperand(i));
19231 for (unsigned i = NumElems/2; i != NumElems; ++i)
19232 Amt2Csts.push_back(Amt->getOperand(i));
19234 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
19235 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
19237 // Variable shift amount
19238 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
19239 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
19242 // Issue new vector shifts for the smaller types
19243 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
19244 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
19246 // Concatenate the result back
19247 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19253 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19254 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19255 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19256 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19257 // has only one use.
19258 SDNode *N = Op.getNode();
19259 SDValue LHS = N->getOperand(0);
19260 SDValue RHS = N->getOperand(1);
19261 unsigned BaseOp = 0;
19264 switch (Op.getOpcode()) {
19265 default: llvm_unreachable("Unknown ovf instruction!");
19267 // A subtract of one will be selected as a INC. Note that INC doesn't
19268 // set CF, so we can't do this for UADDO.
19269 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19271 BaseOp = X86ISD::INC;
19272 Cond = X86::COND_O;
19275 BaseOp = X86ISD::ADD;
19276 Cond = X86::COND_O;
19279 BaseOp = X86ISD::ADD;
19280 Cond = X86::COND_B;
19283 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19284 // set CF, so we can't do this for USUBO.
19285 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19287 BaseOp = X86ISD::DEC;
19288 Cond = X86::COND_O;
19291 BaseOp = X86ISD::SUB;
19292 Cond = X86::COND_O;
19295 BaseOp = X86ISD::SUB;
19296 Cond = X86::COND_B;
19299 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19300 Cond = X86::COND_O;
19302 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19303 if (N->getValueType(0) == MVT::i8) {
19304 BaseOp = X86ISD::UMUL8;
19305 Cond = X86::COND_O;
19308 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19310 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19313 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19314 DAG.getConstant(X86::COND_O, MVT::i32),
19315 SDValue(Sum.getNode(), 2));
19317 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19321 // Also sets EFLAGS.
19322 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19323 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19326 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19327 DAG.getConstant(Cond, MVT::i32),
19328 SDValue(Sum.getNode(), 1));
19330 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19333 // Sign extension of the low part of vector elements. This may be used either
19334 // when sign extend instructions are not available or if the vector element
19335 // sizes already match the sign-extended size. If the vector elements are in
19336 // their pre-extended size and sign extend instructions are available, that will
19337 // be handled by LowerSIGN_EXTEND.
19338 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19339 SelectionDAG &DAG) const {
19341 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19342 MVT VT = Op.getSimpleValueType();
19344 if (!Subtarget->hasSSE2() || !VT.isVector())
19347 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19348 ExtraVT.getScalarType().getSizeInBits();
19350 switch (VT.SimpleTy) {
19351 default: return SDValue();
19354 if (!Subtarget->hasFp256())
19356 if (!Subtarget->hasInt256()) {
19357 // needs to be split
19358 unsigned NumElems = VT.getVectorNumElements();
19360 // Extract the LHS vectors
19361 SDValue LHS = Op.getOperand(0);
19362 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19363 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19365 MVT EltVT = VT.getVectorElementType();
19366 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19368 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19369 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19370 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19372 SDValue Extra = DAG.getValueType(ExtraVT);
19374 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19375 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19377 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19382 SDValue Op0 = Op.getOperand(0);
19384 // This is a sign extension of some low part of vector elements without
19385 // changing the size of the vector elements themselves:
19386 // Shift-Left + Shift-Right-Algebraic.
19387 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19389 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19395 /// Returns true if the operand type is exactly twice the native width, and
19396 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19397 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19398 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19399 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19400 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19403 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19404 else if (OpWidth == 128)
19405 return Subtarget->hasCmpxchg16b();
19410 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19411 return needsCmpXchgNb(SI->getValueOperand()->getType());
19414 // Note: this turns large loads into lock cmpxchg8b/16b.
19415 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19416 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19417 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19418 return needsCmpXchgNb(PTy->getElementType());
19421 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19422 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19423 const Type *MemType = AI->getType();
19425 // If the operand is too big, we must see if cmpxchg8/16b is available
19426 // and default to library calls otherwise.
19427 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19428 return needsCmpXchgNb(MemType);
19430 AtomicRMWInst::BinOp Op = AI->getOperation();
19433 llvm_unreachable("Unknown atomic operation");
19434 case AtomicRMWInst::Xchg:
19435 case AtomicRMWInst::Add:
19436 case AtomicRMWInst::Sub:
19437 // It's better to use xadd, xsub or xchg for these in all cases.
19439 case AtomicRMWInst::Or:
19440 case AtomicRMWInst::And:
19441 case AtomicRMWInst::Xor:
19442 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19443 // prefix to a normal instruction for these operations.
19444 return !AI->use_empty();
19445 case AtomicRMWInst::Nand:
19446 case AtomicRMWInst::Max:
19447 case AtomicRMWInst::Min:
19448 case AtomicRMWInst::UMax:
19449 case AtomicRMWInst::UMin:
19450 // These always require a non-trivial set of data operations on x86. We must
19451 // use a cmpxchg loop.
19456 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19457 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19458 // no-sse2). There isn't any reason to disable it if the target processor
19460 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19464 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19465 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19466 const Type *MemType = AI->getType();
19467 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19468 // there is no benefit in turning such RMWs into loads, and it is actually
19469 // harmful as it introduces a mfence.
19470 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19473 auto Builder = IRBuilder<>(AI);
19474 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19475 auto SynchScope = AI->getSynchScope();
19476 // We must restrict the ordering to avoid generating loads with Release or
19477 // ReleaseAcquire orderings.
19478 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19479 auto Ptr = AI->getPointerOperand();
19481 // Before the load we need a fence. Here is an example lifted from
19482 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19485 // x.store(1, relaxed);
19486 // r1 = y.fetch_add(0, release);
19488 // y.fetch_add(42, acquire);
19489 // r2 = x.load(relaxed);
19490 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19491 // lowered to just a load without a fence. A mfence flushes the store buffer,
19492 // making the optimization clearly correct.
19493 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19494 // otherwise, we might be able to be more agressive on relaxed idempotent
19495 // rmw. In practice, they do not look useful, so we don't try to be
19496 // especially clever.
19497 if (SynchScope == SingleThread) {
19498 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19499 // the IR level, so we must wrap it in an intrinsic.
19501 } else if (hasMFENCE(*Subtarget)) {
19502 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19503 Intrinsic::x86_sse2_mfence);
19504 Builder.CreateCall(MFence);
19506 // FIXME: it might make sense to use a locked operation here but on a
19507 // different cache-line to prevent cache-line bouncing. In practice it
19508 // is probably a small win, and x86 processors without mfence are rare
19509 // enough that we do not bother.
19513 // Finally we can emit the atomic load.
19514 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19515 AI->getType()->getPrimitiveSizeInBits());
19516 Loaded->setAtomic(Order, SynchScope);
19517 AI->replaceAllUsesWith(Loaded);
19518 AI->eraseFromParent();
19522 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19523 SelectionDAG &DAG) {
19525 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19526 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19527 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19528 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19530 // The only fence that needs an instruction is a sequentially-consistent
19531 // cross-thread fence.
19532 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19533 if (hasMFENCE(*Subtarget))
19534 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19536 SDValue Chain = Op.getOperand(0);
19537 SDValue Zero = DAG.getConstant(0, MVT::i32);
19539 DAG.getRegister(X86::ESP, MVT::i32), // Base
19540 DAG.getTargetConstant(1, MVT::i8), // Scale
19541 DAG.getRegister(0, MVT::i32), // Index
19542 DAG.getTargetConstant(0, MVT::i32), // Disp
19543 DAG.getRegister(0, MVT::i32), // Segment.
19547 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19548 return SDValue(Res, 0);
19551 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19552 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19555 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19556 SelectionDAG &DAG) {
19557 MVT T = Op.getSimpleValueType();
19561 switch(T.SimpleTy) {
19562 default: llvm_unreachable("Invalid value type!");
19563 case MVT::i8: Reg = X86::AL; size = 1; break;
19564 case MVT::i16: Reg = X86::AX; size = 2; break;
19565 case MVT::i32: Reg = X86::EAX; size = 4; break;
19567 assert(Subtarget->is64Bit() && "Node not type legal!");
19568 Reg = X86::RAX; size = 8;
19571 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19572 Op.getOperand(2), SDValue());
19573 SDValue Ops[] = { cpIn.getValue(0),
19576 DAG.getTargetConstant(size, MVT::i8),
19577 cpIn.getValue(1) };
19578 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19579 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19580 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19584 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19585 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19586 MVT::i32, cpOut.getValue(2));
19587 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19588 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19590 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19591 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19592 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19596 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19597 SelectionDAG &DAG) {
19598 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19599 MVT DstVT = Op.getSimpleValueType();
19601 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19602 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19603 if (DstVT != MVT::f64)
19604 // This conversion needs to be expanded.
19607 SDValue InVec = Op->getOperand(0);
19609 unsigned NumElts = SrcVT.getVectorNumElements();
19610 EVT SVT = SrcVT.getVectorElementType();
19612 // Widen the vector in input in the case of MVT::v2i32.
19613 // Example: from MVT::v2i32 to MVT::v4i32.
19614 SmallVector<SDValue, 16> Elts;
19615 for (unsigned i = 0, e = NumElts; i != e; ++i)
19616 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19617 DAG.getIntPtrConstant(i)));
19619 // Explicitly mark the extra elements as Undef.
19620 SDValue Undef = DAG.getUNDEF(SVT);
19621 for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
19622 Elts.push_back(Undef);
19624 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19625 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19626 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19627 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19628 DAG.getIntPtrConstant(0));
19631 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19632 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19633 assert((DstVT == MVT::i64 ||
19634 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19635 "Unexpected custom BITCAST");
19636 // i64 <=> MMX conversions are Legal.
19637 if (SrcVT==MVT::i64 && DstVT.isVector())
19639 if (DstVT==MVT::i64 && SrcVT.isVector())
19641 // MMX <=> MMX conversions are Legal.
19642 if (SrcVT.isVector() && DstVT.isVector())
19644 // All other conversions need to be expanded.
19648 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19649 SelectionDAG &DAG) {
19650 SDNode *Node = Op.getNode();
19653 Op = Op.getOperand(0);
19654 EVT VT = Op.getValueType();
19655 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19656 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19658 unsigned NumElts = VT.getVectorNumElements();
19659 EVT EltVT = VT.getVectorElementType();
19660 unsigned Len = EltVT.getSizeInBits();
19662 // This is the vectorized version of the "best" algorithm from
19663 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19664 // with a minor tweak to use a series of adds + shifts instead of vector
19665 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19667 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19668 // v8i32 => Always profitable
19670 // FIXME: There a couple of possible improvements:
19672 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19673 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19675 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19676 "CTPOP not implemented for this vector element type.");
19678 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19679 // extra legalization.
19680 bool NeedsBitcast = EltVT == MVT::i32;
19681 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19683 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19684 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19685 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19687 // v = v - ((v >> 1) & 0x55555555...)
19688 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19689 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19690 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19692 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19694 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19695 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19697 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19699 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19700 if (VT != And.getValueType())
19701 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19702 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19704 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19705 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19706 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19707 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19708 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19710 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19711 if (NeedsBitcast) {
19712 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19713 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19714 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19717 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19718 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19719 if (VT != AndRHS.getValueType()) {
19720 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19721 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19723 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19725 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19726 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19727 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19728 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19729 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19731 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19732 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19733 if (NeedsBitcast) {
19734 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19735 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19737 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19738 if (VT != And.getValueType())
19739 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19741 // The algorithm mentioned above uses:
19742 // v = (v * 0x01010101...) >> (Len - 8)
19744 // Change it to use vector adds + vector shifts which yield faster results on
19745 // Haswell than using vector integer multiplication.
19747 // For i32 elements:
19748 // v = v + (v >> 8)
19749 // v = v + (v >> 16)
19751 // For i64 elements:
19752 // v = v + (v >> 8)
19753 // v = v + (v >> 16)
19754 // v = v + (v >> 32)
19757 SmallVector<SDValue, 8> Csts;
19758 for (unsigned i = 8; i <= Len/2; i *= 2) {
19759 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19760 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19761 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19762 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19766 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19767 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19768 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19769 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19770 if (NeedsBitcast) {
19771 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19772 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19774 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19775 if (VT != And.getValueType())
19776 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19781 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19782 SDNode *Node = Op.getNode();
19784 EVT T = Node->getValueType(0);
19785 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19786 DAG.getConstant(0, T), Node->getOperand(2));
19787 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19788 cast<AtomicSDNode>(Node)->getMemoryVT(),
19789 Node->getOperand(0),
19790 Node->getOperand(1), negOp,
19791 cast<AtomicSDNode>(Node)->getMemOperand(),
19792 cast<AtomicSDNode>(Node)->getOrdering(),
19793 cast<AtomicSDNode>(Node)->getSynchScope());
19796 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19797 SDNode *Node = Op.getNode();
19799 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19801 // Convert seq_cst store -> xchg
19802 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19803 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19804 // (The only way to get a 16-byte store is cmpxchg16b)
19805 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19806 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19807 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19808 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19809 cast<AtomicSDNode>(Node)->getMemoryVT(),
19810 Node->getOperand(0),
19811 Node->getOperand(1), Node->getOperand(2),
19812 cast<AtomicSDNode>(Node)->getMemOperand(),
19813 cast<AtomicSDNode>(Node)->getOrdering(),
19814 cast<AtomicSDNode>(Node)->getSynchScope());
19815 return Swap.getValue(1);
19817 // Other atomic stores have a simple pattern.
19821 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19822 EVT VT = Op.getNode()->getSimpleValueType(0);
19824 // Let legalize expand this if it isn't a legal type yet.
19825 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19828 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19831 bool ExtraOp = false;
19832 switch (Op.getOpcode()) {
19833 default: llvm_unreachable("Invalid code");
19834 case ISD::ADDC: Opc = X86ISD::ADD; break;
19835 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19836 case ISD::SUBC: Opc = X86ISD::SUB; break;
19837 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19841 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19843 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19844 Op.getOperand(1), Op.getOperand(2));
19847 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19848 SelectionDAG &DAG) {
19849 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19851 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19852 // which returns the values as { float, float } (in XMM0) or
19853 // { double, double } (which is returned in XMM0, XMM1).
19855 SDValue Arg = Op.getOperand(0);
19856 EVT ArgVT = Arg.getValueType();
19857 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19859 TargetLowering::ArgListTy Args;
19860 TargetLowering::ArgListEntry Entry;
19864 Entry.isSExt = false;
19865 Entry.isZExt = false;
19866 Args.push_back(Entry);
19868 bool isF64 = ArgVT == MVT::f64;
19869 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19870 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19871 // the results are returned via SRet in memory.
19872 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19873 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19874 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
19876 Type *RetTy = isF64
19877 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19878 : (Type*)VectorType::get(ArgTy, 4);
19880 TargetLowering::CallLoweringInfo CLI(DAG);
19881 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19882 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19884 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19887 // Returned in xmm0 and xmm1.
19888 return CallResult.first;
19890 // Returned in bits 0:31 and 32:64 xmm0.
19891 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19892 CallResult.first, DAG.getIntPtrConstant(0));
19893 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19894 CallResult.first, DAG.getIntPtrConstant(1));
19895 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19896 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19899 /// LowerOperation - Provide custom lowering hooks for some operations.
19901 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
19902 switch (Op.getOpcode()) {
19903 default: llvm_unreachable("Should not custom lower this!");
19904 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
19905 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
19906 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
19907 return LowerCMP_SWAP(Op, Subtarget, DAG);
19908 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
19909 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
19910 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
19911 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
19912 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
19913 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
19914 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
19915 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
19916 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
19917 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
19918 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
19919 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
19920 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
19921 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
19922 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
19923 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
19924 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
19925 case ISD::SHL_PARTS:
19926 case ISD::SRA_PARTS:
19927 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
19928 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
19929 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
19930 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
19931 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
19932 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
19933 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
19934 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
19935 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
19936 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
19937 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
19939 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
19940 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
19941 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
19942 case ISD::SETCC: return LowerSETCC(Op, DAG);
19943 case ISD::SELECT: return LowerSELECT(Op, DAG);
19944 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
19945 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
19946 case ISD::VASTART: return LowerVASTART(Op, DAG);
19947 case ISD::VAARG: return LowerVAARG(Op, DAG);
19948 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
19949 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
19950 case ISD::INTRINSIC_VOID:
19951 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
19952 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
19953 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
19954 case ISD::FRAME_TO_ARGS_OFFSET:
19955 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
19956 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
19957 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
19958 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
19959 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
19960 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
19961 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
19962 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
19963 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
19964 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
19965 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
19966 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
19967 case ISD::UMUL_LOHI:
19968 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
19971 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
19977 case ISD::UMULO: return LowerXALUO(Op, DAG);
19978 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
19979 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
19983 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
19984 case ISD::ADD: return LowerADD(Op, DAG);
19985 case ISD::SUB: return LowerSUB(Op, DAG);
19986 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
19990 /// ReplaceNodeResults - Replace a node with an illegal result type
19991 /// with a new node built out of custom code.
19992 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
19993 SmallVectorImpl<SDValue>&Results,
19994 SelectionDAG &DAG) const {
19996 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19997 switch (N->getOpcode()) {
19999 llvm_unreachable("Do not know how to custom type legalize this operation!");
20000 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
20001 case X86ISD::FMINC:
20003 case X86ISD::FMAXC:
20004 case X86ISD::FMAX: {
20005 EVT VT = N->getValueType(0);
20006 if (VT != MVT::v2f32)
20007 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
20008 SDValue UNDEF = DAG.getUNDEF(VT);
20009 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20010 N->getOperand(0), UNDEF);
20011 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20012 N->getOperand(1), UNDEF);
20013 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
20016 case ISD::SIGN_EXTEND_INREG:
20021 // We don't want to expand or promote these.
20028 case ISD::UDIVREM: {
20029 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
20030 Results.push_back(V);
20033 case ISD::FP_TO_SINT:
20034 case ISD::FP_TO_UINT: {
20035 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
20037 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
20040 std::pair<SDValue,SDValue> Vals =
20041 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
20042 SDValue FIST = Vals.first, StackSlot = Vals.second;
20043 if (FIST.getNode()) {
20044 EVT VT = N->getValueType(0);
20045 // Return a load from the stack slot.
20046 if (StackSlot.getNode())
20047 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
20048 MachinePointerInfo(),
20049 false, false, false, 0));
20051 Results.push_back(FIST);
20055 case ISD::UINT_TO_FP: {
20056 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20057 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
20058 N->getValueType(0) != MVT::v2f32)
20060 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
20062 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
20064 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
20065 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
20066 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
20067 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
20068 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
20069 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
20072 case ISD::FP_ROUND: {
20073 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
20075 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
20076 Results.push_back(V);
20079 case ISD::INTRINSIC_W_CHAIN: {
20080 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
20082 default : llvm_unreachable("Do not know how to custom type "
20083 "legalize this intrinsic operation!");
20084 case Intrinsic::x86_rdtsc:
20085 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20087 case Intrinsic::x86_rdtscp:
20088 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
20090 case Intrinsic::x86_rdpmc:
20091 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
20094 case ISD::READCYCLECOUNTER: {
20095 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20098 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
20099 EVT T = N->getValueType(0);
20100 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
20101 bool Regs64bit = T == MVT::i128;
20102 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
20103 SDValue cpInL, cpInH;
20104 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20105 DAG.getConstant(0, HalfT));
20106 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20107 DAG.getConstant(1, HalfT));
20108 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
20109 Regs64bit ? X86::RAX : X86::EAX,
20111 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
20112 Regs64bit ? X86::RDX : X86::EDX,
20113 cpInH, cpInL.getValue(1));
20114 SDValue swapInL, swapInH;
20115 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20116 DAG.getConstant(0, HalfT));
20117 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20118 DAG.getConstant(1, HalfT));
20119 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
20120 Regs64bit ? X86::RBX : X86::EBX,
20121 swapInL, cpInH.getValue(1));
20122 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
20123 Regs64bit ? X86::RCX : X86::ECX,
20124 swapInH, swapInL.getValue(1));
20125 SDValue Ops[] = { swapInH.getValue(0),
20127 swapInH.getValue(1) };
20128 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20129 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
20130 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
20131 X86ISD::LCMPXCHG8_DAG;
20132 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
20133 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
20134 Regs64bit ? X86::RAX : X86::EAX,
20135 HalfT, Result.getValue(1));
20136 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
20137 Regs64bit ? X86::RDX : X86::EDX,
20138 HalfT, cpOutL.getValue(2));
20139 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
20141 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
20142 MVT::i32, cpOutH.getValue(2));
20144 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20145 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
20146 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
20148 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
20149 Results.push_back(Success);
20150 Results.push_back(EFLAGS.getValue(1));
20153 case ISD::ATOMIC_SWAP:
20154 case ISD::ATOMIC_LOAD_ADD:
20155 case ISD::ATOMIC_LOAD_SUB:
20156 case ISD::ATOMIC_LOAD_AND:
20157 case ISD::ATOMIC_LOAD_OR:
20158 case ISD::ATOMIC_LOAD_XOR:
20159 case ISD::ATOMIC_LOAD_NAND:
20160 case ISD::ATOMIC_LOAD_MIN:
20161 case ISD::ATOMIC_LOAD_MAX:
20162 case ISD::ATOMIC_LOAD_UMIN:
20163 case ISD::ATOMIC_LOAD_UMAX:
20164 case ISD::ATOMIC_LOAD: {
20165 // Delegate to generic TypeLegalization. Situations we can really handle
20166 // should have already been dealt with by AtomicExpandPass.cpp.
20169 case ISD::BITCAST: {
20170 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20171 EVT DstVT = N->getValueType(0);
20172 EVT SrcVT = N->getOperand(0)->getValueType(0);
20174 if (SrcVT != MVT::f64 ||
20175 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
20178 unsigned NumElts = DstVT.getVectorNumElements();
20179 EVT SVT = DstVT.getVectorElementType();
20180 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20181 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
20182 MVT::v2f64, N->getOperand(0));
20183 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
20185 if (ExperimentalVectorWideningLegalization) {
20186 // If we are legalizing vectors by widening, we already have the desired
20187 // legal vector type, just return it.
20188 Results.push_back(ToVecInt);
20192 SmallVector<SDValue, 8> Elts;
20193 for (unsigned i = 0, e = NumElts; i != e; ++i)
20194 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
20195 ToVecInt, DAG.getIntPtrConstant(i)));
20197 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
20202 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
20204 default: return nullptr;
20205 case X86ISD::BSF: return "X86ISD::BSF";
20206 case X86ISD::BSR: return "X86ISD::BSR";
20207 case X86ISD::SHLD: return "X86ISD::SHLD";
20208 case X86ISD::SHRD: return "X86ISD::SHRD";
20209 case X86ISD::FAND: return "X86ISD::FAND";
20210 case X86ISD::FANDN: return "X86ISD::FANDN";
20211 case X86ISD::FOR: return "X86ISD::FOR";
20212 case X86ISD::FXOR: return "X86ISD::FXOR";
20213 case X86ISD::FSRL: return "X86ISD::FSRL";
20214 case X86ISD::FILD: return "X86ISD::FILD";
20215 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
20216 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
20217 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
20218 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
20219 case X86ISD::FLD: return "X86ISD::FLD";
20220 case X86ISD::FST: return "X86ISD::FST";
20221 case X86ISD::CALL: return "X86ISD::CALL";
20222 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
20223 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
20224 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
20225 case X86ISD::BT: return "X86ISD::BT";
20226 case X86ISD::CMP: return "X86ISD::CMP";
20227 case X86ISD::COMI: return "X86ISD::COMI";
20228 case X86ISD::UCOMI: return "X86ISD::UCOMI";
20229 case X86ISD::CMPM: return "X86ISD::CMPM";
20230 case X86ISD::CMPMU: return "X86ISD::CMPMU";
20231 case X86ISD::SETCC: return "X86ISD::SETCC";
20232 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
20233 case X86ISD::FSETCC: return "X86ISD::FSETCC";
20234 case X86ISD::CMOV: return "X86ISD::CMOV";
20235 case X86ISD::BRCOND: return "X86ISD::BRCOND";
20236 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
20237 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
20238 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
20239 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
20240 case X86ISD::Wrapper: return "X86ISD::Wrapper";
20241 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
20242 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20243 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20244 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20245 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20246 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20247 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20248 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20249 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20250 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20251 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20252 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20253 case X86ISD::HADD: return "X86ISD::HADD";
20254 case X86ISD::HSUB: return "X86ISD::HSUB";
20255 case X86ISD::FHADD: return "X86ISD::FHADD";
20256 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20257 case X86ISD::UMAX: return "X86ISD::UMAX";
20258 case X86ISD::UMIN: return "X86ISD::UMIN";
20259 case X86ISD::SMAX: return "X86ISD::SMAX";
20260 case X86ISD::SMIN: return "X86ISD::SMIN";
20261 case X86ISD::FMAX: return "X86ISD::FMAX";
20262 case X86ISD::FMIN: return "X86ISD::FMIN";
20263 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20264 case X86ISD::FMINC: return "X86ISD::FMINC";
20265 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20266 case X86ISD::FRCP: return "X86ISD::FRCP";
20267 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20268 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20269 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20270 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20271 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20272 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20273 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20274 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20275 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20276 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20277 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20278 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20279 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20280 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20281 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20282 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20283 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20284 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
20285 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20286 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20287 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20288 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20289 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20290 case X86ISD::VSHL: return "X86ISD::VSHL";
20291 case X86ISD::VSRL: return "X86ISD::VSRL";
20292 case X86ISD::VSRA: return "X86ISD::VSRA";
20293 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20294 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20295 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20296 case X86ISD::CMPP: return "X86ISD::CMPP";
20297 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20298 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20299 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20300 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20301 case X86ISD::ADD: return "X86ISD::ADD";
20302 case X86ISD::SUB: return "X86ISD::SUB";
20303 case X86ISD::ADC: return "X86ISD::ADC";
20304 case X86ISD::SBB: return "X86ISD::SBB";
20305 case X86ISD::SMUL: return "X86ISD::SMUL";
20306 case X86ISD::UMUL: return "X86ISD::UMUL";
20307 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20308 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20309 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20310 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20311 case X86ISD::INC: return "X86ISD::INC";
20312 case X86ISD::DEC: return "X86ISD::DEC";
20313 case X86ISD::OR: return "X86ISD::OR";
20314 case X86ISD::XOR: return "X86ISD::XOR";
20315 case X86ISD::AND: return "X86ISD::AND";
20316 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20317 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20318 case X86ISD::PTEST: return "X86ISD::PTEST";
20319 case X86ISD::TESTP: return "X86ISD::TESTP";
20320 case X86ISD::TESTM: return "X86ISD::TESTM";
20321 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20322 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20323 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20324 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20325 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20326 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20327 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20328 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20329 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20330 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20331 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20332 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20333 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20334 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20335 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20336 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20337 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20338 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20339 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20340 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20341 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20342 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20343 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20344 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20345 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20346 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20347 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20348 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20349 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20350 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20351 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20352 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20353 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20354 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20355 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20356 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20357 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20358 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20359 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20360 case X86ISD::SAHF: return "X86ISD::SAHF";
20361 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20362 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20363 case X86ISD::FMADD: return "X86ISD::FMADD";
20364 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20365 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20366 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20367 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20368 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20369 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20370 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20371 case X86ISD::XTEST: return "X86ISD::XTEST";
20372 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20373 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20374 case X86ISD::SELECT: return "X86ISD::SELECT";
20375 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20376 case X86ISD::RCP28: return "X86ISD::RCP28";
20377 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20381 // isLegalAddressingMode - Return true if the addressing mode represented
20382 // by AM is legal for this target, for a load/store of the specified type.
20383 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20385 // X86 supports extremely general addressing modes.
20386 CodeModel::Model M = getTargetMachine().getCodeModel();
20387 Reloc::Model R = getTargetMachine().getRelocationModel();
20389 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20390 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20395 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20397 // If a reference to this global requires an extra load, we can't fold it.
20398 if (isGlobalStubReference(GVFlags))
20401 // If BaseGV requires a register for the PIC base, we cannot also have a
20402 // BaseReg specified.
20403 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20406 // If lower 4G is not available, then we must use rip-relative addressing.
20407 if ((M != CodeModel::Small || R != Reloc::Static) &&
20408 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20412 switch (AM.Scale) {
20418 // These scales always work.
20423 // These scales are formed with basereg+scalereg. Only accept if there is
20428 default: // Other stuff never works.
20435 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20436 unsigned Bits = Ty->getScalarSizeInBits();
20438 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20439 // particularly cheaper than those without.
20443 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20444 // variable shifts just as cheap as scalar ones.
20445 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20448 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20449 // fully general vector.
20453 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20454 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20456 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20457 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20458 return NumBits1 > NumBits2;
20461 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20462 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20465 if (!isTypeLegal(EVT::getEVT(Ty1)))
20468 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20470 // Assuming the caller doesn't have a zeroext or signext return parameter,
20471 // truncation all the way down to i1 is valid.
20475 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20476 return isInt<32>(Imm);
20479 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20480 // Can also use sub to handle negated immediates.
20481 return isInt<32>(Imm);
20484 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20485 if (!VT1.isInteger() || !VT2.isInteger())
20487 unsigned NumBits1 = VT1.getSizeInBits();
20488 unsigned NumBits2 = VT2.getSizeInBits();
20489 return NumBits1 > NumBits2;
20492 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20493 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20494 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20497 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20498 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20499 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20502 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20503 EVT VT1 = Val.getValueType();
20504 if (isZExtFree(VT1, VT2))
20507 if (Val.getOpcode() != ISD::LOAD)
20510 if (!VT1.isSimple() || !VT1.isInteger() ||
20511 !VT2.isSimple() || !VT2.isInteger())
20514 switch (VT1.getSimpleVT().SimpleTy) {
20519 // X86 has 8, 16, and 32-bit zero-extending loads.
20526 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
20529 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20530 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20533 VT = VT.getScalarType();
20535 if (!VT.isSimple())
20538 switch (VT.getSimpleVT().SimpleTy) {
20549 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20550 // i16 instructions are longer (0x66 prefix) and potentially slower.
20551 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20554 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20555 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20556 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20557 /// are assumed to be legal.
20559 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20561 if (!VT.isSimple())
20564 MVT SVT = VT.getSimpleVT();
20566 // Very little shuffling can be done for 64-bit vectors right now.
20567 if (VT.getSizeInBits() == 64)
20570 // This is an experimental legality test that is tailored to match the
20571 // legality test of the experimental lowering more closely. They are gated
20572 // separately to ease testing of performance differences.
20573 if (ExperimentalVectorShuffleLegality)
20574 // We only care that the types being shuffled are legal. The lowering can
20575 // handle any possible shuffle mask that results.
20576 return isTypeLegal(SVT);
20578 // If this is a single-input shuffle with no 128 bit lane crossings we can
20579 // lower it into pshufb.
20580 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20581 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20582 bool isLegal = true;
20583 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20584 if (M[I] >= (int)SVT.getVectorNumElements() ||
20585 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20594 // FIXME: blends, shifts.
20595 return (SVT.getVectorNumElements() == 2 ||
20596 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20597 isMOVLMask(M, SVT) ||
20598 isCommutedMOVLMask(M, SVT) ||
20599 isMOVHLPSMask(M, SVT) ||
20600 isSHUFPMask(M, SVT) ||
20601 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20602 isPSHUFDMask(M, SVT) ||
20603 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20604 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20605 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20606 isPALIGNRMask(M, SVT, Subtarget) ||
20607 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20608 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20609 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20610 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20611 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20612 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20616 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20618 if (!VT.isSimple())
20621 MVT SVT = VT.getSimpleVT();
20623 // This is an experimental legality test that is tailored to match the
20624 // legality test of the experimental lowering more closely. They are gated
20625 // separately to ease testing of performance differences.
20626 if (ExperimentalVectorShuffleLegality)
20627 // The new vector shuffle lowering is very good at managing zero-inputs.
20628 return isShuffleMaskLegal(Mask, VT);
20630 unsigned NumElts = SVT.getVectorNumElements();
20631 // FIXME: This collection of masks seems suspect.
20634 if (NumElts == 4 && SVT.is128BitVector()) {
20635 return (isMOVLMask(Mask, SVT) ||
20636 isCommutedMOVLMask(Mask, SVT, true) ||
20637 isSHUFPMask(Mask, SVT) ||
20638 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20639 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20640 Subtarget->hasInt256()));
20645 //===----------------------------------------------------------------------===//
20646 // X86 Scheduler Hooks
20647 //===----------------------------------------------------------------------===//
20649 /// Utility function to emit xbegin specifying the start of an RTM region.
20650 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20651 const TargetInstrInfo *TII) {
20652 DebugLoc DL = MI->getDebugLoc();
20654 const BasicBlock *BB = MBB->getBasicBlock();
20655 MachineFunction::iterator I = MBB;
20658 // For the v = xbegin(), we generate
20669 MachineBasicBlock *thisMBB = MBB;
20670 MachineFunction *MF = MBB->getParent();
20671 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20672 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20673 MF->insert(I, mainMBB);
20674 MF->insert(I, sinkMBB);
20676 // Transfer the remainder of BB and its successor edges to sinkMBB.
20677 sinkMBB->splice(sinkMBB->begin(), MBB,
20678 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20679 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20683 // # fallthrough to mainMBB
20684 // # abortion to sinkMBB
20685 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20686 thisMBB->addSuccessor(mainMBB);
20687 thisMBB->addSuccessor(sinkMBB);
20691 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20692 mainMBB->addSuccessor(sinkMBB);
20695 // EAX is live into the sinkMBB
20696 sinkMBB->addLiveIn(X86::EAX);
20697 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20698 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20701 MI->eraseFromParent();
20705 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20706 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20707 // in the .td file.
20708 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20709 const TargetInstrInfo *TII) {
20711 switch (MI->getOpcode()) {
20712 default: llvm_unreachable("illegal opcode!");
20713 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20714 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20715 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20716 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20717 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20718 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20719 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20720 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20723 DebugLoc dl = MI->getDebugLoc();
20724 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20726 unsigned NumArgs = MI->getNumOperands();
20727 for (unsigned i = 1; i < NumArgs; ++i) {
20728 MachineOperand &Op = MI->getOperand(i);
20729 if (!(Op.isReg() && Op.isImplicit()))
20730 MIB.addOperand(Op);
20732 if (MI->hasOneMemOperand())
20733 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20735 BuildMI(*BB, MI, dl,
20736 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20737 .addReg(X86::XMM0);
20739 MI->eraseFromParent();
20743 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20744 // defs in an instruction pattern
20745 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20746 const TargetInstrInfo *TII) {
20748 switch (MI->getOpcode()) {
20749 default: llvm_unreachable("illegal opcode!");
20750 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20751 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20752 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20753 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20754 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20755 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20756 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20757 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20760 DebugLoc dl = MI->getDebugLoc();
20761 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20763 unsigned NumArgs = MI->getNumOperands(); // remove the results
20764 for (unsigned i = 1; i < NumArgs; ++i) {
20765 MachineOperand &Op = MI->getOperand(i);
20766 if (!(Op.isReg() && Op.isImplicit()))
20767 MIB.addOperand(Op);
20769 if (MI->hasOneMemOperand())
20770 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20772 BuildMI(*BB, MI, dl,
20773 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20776 MI->eraseFromParent();
20780 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20781 const X86Subtarget *Subtarget) {
20782 DebugLoc dl = MI->getDebugLoc();
20783 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20784 // Address into RAX/EAX, other two args into ECX, EDX.
20785 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20786 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20787 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20788 for (int i = 0; i < X86::AddrNumOperands; ++i)
20789 MIB.addOperand(MI->getOperand(i));
20791 unsigned ValOps = X86::AddrNumOperands;
20792 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20793 .addReg(MI->getOperand(ValOps).getReg());
20794 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20795 .addReg(MI->getOperand(ValOps+1).getReg());
20797 // The instruction doesn't actually take any operands though.
20798 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20800 MI->eraseFromParent(); // The pseudo is gone now.
20804 MachineBasicBlock *
20805 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
20806 MachineBasicBlock *MBB) const {
20807 // Emit va_arg instruction on X86-64.
20809 // Operands to this pseudo-instruction:
20810 // 0 ) Output : destination address (reg)
20811 // 1-5) Input : va_list address (addr, i64mem)
20812 // 6 ) ArgSize : Size (in bytes) of vararg type
20813 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20814 // 8 ) Align : Alignment of type
20815 // 9 ) EFLAGS (implicit-def)
20817 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20818 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20820 unsigned DestReg = MI->getOperand(0).getReg();
20821 MachineOperand &Base = MI->getOperand(1);
20822 MachineOperand &Scale = MI->getOperand(2);
20823 MachineOperand &Index = MI->getOperand(3);
20824 MachineOperand &Disp = MI->getOperand(4);
20825 MachineOperand &Segment = MI->getOperand(5);
20826 unsigned ArgSize = MI->getOperand(6).getImm();
20827 unsigned ArgMode = MI->getOperand(7).getImm();
20828 unsigned Align = MI->getOperand(8).getImm();
20830 // Memory Reference
20831 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20832 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20833 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20835 // Machine Information
20836 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20837 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20838 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20839 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20840 DebugLoc DL = MI->getDebugLoc();
20842 // struct va_list {
20845 // i64 overflow_area (address)
20846 // i64 reg_save_area (address)
20848 // sizeof(va_list) = 24
20849 // alignment(va_list) = 8
20851 unsigned TotalNumIntRegs = 6;
20852 unsigned TotalNumXMMRegs = 8;
20853 bool UseGPOffset = (ArgMode == 1);
20854 bool UseFPOffset = (ArgMode == 2);
20855 unsigned MaxOffset = TotalNumIntRegs * 8 +
20856 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
20858 /* Align ArgSize to a multiple of 8 */
20859 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
20860 bool NeedsAlign = (Align > 8);
20862 MachineBasicBlock *thisMBB = MBB;
20863 MachineBasicBlock *overflowMBB;
20864 MachineBasicBlock *offsetMBB;
20865 MachineBasicBlock *endMBB;
20867 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
20868 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
20869 unsigned OffsetReg = 0;
20871 if (!UseGPOffset && !UseFPOffset) {
20872 // If we only pull from the overflow region, we don't create a branch.
20873 // We don't need to alter control flow.
20874 OffsetDestReg = 0; // unused
20875 OverflowDestReg = DestReg;
20877 offsetMBB = nullptr;
20878 overflowMBB = thisMBB;
20881 // First emit code to check if gp_offset (or fp_offset) is below the bound.
20882 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
20883 // If not, pull from overflow_area. (branch to overflowMBB)
20888 // offsetMBB overflowMBB
20893 // Registers for the PHI in endMBB
20894 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
20895 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
20897 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20898 MachineFunction *MF = MBB->getParent();
20899 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20900 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20901 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20903 MachineFunction::iterator MBBIter = MBB;
20906 // Insert the new basic blocks
20907 MF->insert(MBBIter, offsetMBB);
20908 MF->insert(MBBIter, overflowMBB);
20909 MF->insert(MBBIter, endMBB);
20911 // Transfer the remainder of MBB and its successor edges to endMBB.
20912 endMBB->splice(endMBB->begin(), thisMBB,
20913 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
20914 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
20916 // Make offsetMBB and overflowMBB successors of thisMBB
20917 thisMBB->addSuccessor(offsetMBB);
20918 thisMBB->addSuccessor(overflowMBB);
20920 // endMBB is a successor of both offsetMBB and overflowMBB
20921 offsetMBB->addSuccessor(endMBB);
20922 overflowMBB->addSuccessor(endMBB);
20924 // Load the offset value into a register
20925 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20926 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
20930 .addDisp(Disp, UseFPOffset ? 4 : 0)
20931 .addOperand(Segment)
20932 .setMemRefs(MMOBegin, MMOEnd);
20934 // Check if there is enough room left to pull this argument.
20935 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
20937 .addImm(MaxOffset + 8 - ArgSizeA8);
20939 // Branch to "overflowMBB" if offset >= max
20940 // Fall through to "offsetMBB" otherwise
20941 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
20942 .addMBB(overflowMBB);
20945 // In offsetMBB, emit code to use the reg_save_area.
20947 assert(OffsetReg != 0);
20949 // Read the reg_save_area address.
20950 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
20951 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
20956 .addOperand(Segment)
20957 .setMemRefs(MMOBegin, MMOEnd);
20959 // Zero-extend the offset
20960 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
20961 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
20964 .addImm(X86::sub_32bit);
20966 // Add the offset to the reg_save_area to get the final address.
20967 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
20968 .addReg(OffsetReg64)
20969 .addReg(RegSaveReg);
20971 // Compute the offset for the next argument
20972 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20973 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
20975 .addImm(UseFPOffset ? 16 : 8);
20977 // Store it back into the va_list.
20978 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
20982 .addDisp(Disp, UseFPOffset ? 4 : 0)
20983 .addOperand(Segment)
20984 .addReg(NextOffsetReg)
20985 .setMemRefs(MMOBegin, MMOEnd);
20988 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
20993 // Emit code to use overflow area
20996 // Load the overflow_area address into a register.
20997 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
20998 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
21003 .addOperand(Segment)
21004 .setMemRefs(MMOBegin, MMOEnd);
21006 // If we need to align it, do so. Otherwise, just copy the address
21007 // to OverflowDestReg.
21009 // Align the overflow address
21010 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
21011 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
21013 // aligned_addr = (addr + (align-1)) & ~(align-1)
21014 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
21015 .addReg(OverflowAddrReg)
21018 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
21020 .addImm(~(uint64_t)(Align-1));
21022 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
21023 .addReg(OverflowAddrReg);
21026 // Compute the next overflow address after this argument.
21027 // (the overflow address should be kept 8-byte aligned)
21028 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
21029 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
21030 .addReg(OverflowDestReg)
21031 .addImm(ArgSizeA8);
21033 // Store the new overflow address.
21034 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
21039 .addOperand(Segment)
21040 .addReg(NextAddrReg)
21041 .setMemRefs(MMOBegin, MMOEnd);
21043 // If we branched, emit the PHI to the front of endMBB.
21045 BuildMI(*endMBB, endMBB->begin(), DL,
21046 TII->get(X86::PHI), DestReg)
21047 .addReg(OffsetDestReg).addMBB(offsetMBB)
21048 .addReg(OverflowDestReg).addMBB(overflowMBB);
21051 // Erase the pseudo instruction
21052 MI->eraseFromParent();
21057 MachineBasicBlock *
21058 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
21060 MachineBasicBlock *MBB) const {
21061 // Emit code to save XMM registers to the stack. The ABI says that the
21062 // number of registers to save is given in %al, so it's theoretically
21063 // possible to do an indirect jump trick to avoid saving all of them,
21064 // however this code takes a simpler approach and just executes all
21065 // of the stores if %al is non-zero. It's less code, and it's probably
21066 // easier on the hardware branch predictor, and stores aren't all that
21067 // expensive anyway.
21069 // Create the new basic blocks. One block contains all the XMM stores,
21070 // and one block is the final destination regardless of whether any
21071 // stores were performed.
21072 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21073 MachineFunction *F = MBB->getParent();
21074 MachineFunction::iterator MBBIter = MBB;
21076 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
21077 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
21078 F->insert(MBBIter, XMMSaveMBB);
21079 F->insert(MBBIter, EndMBB);
21081 // Transfer the remainder of MBB and its successor edges to EndMBB.
21082 EndMBB->splice(EndMBB->begin(), MBB,
21083 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21084 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
21086 // The original block will now fall through to the XMM save block.
21087 MBB->addSuccessor(XMMSaveMBB);
21088 // The XMMSaveMBB will fall through to the end block.
21089 XMMSaveMBB->addSuccessor(EndMBB);
21091 // Now add the instructions.
21092 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21093 DebugLoc DL = MI->getDebugLoc();
21095 unsigned CountReg = MI->getOperand(0).getReg();
21096 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
21097 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
21099 if (!Subtarget->isTargetWin64()) {
21100 // If %al is 0, branch around the XMM save block.
21101 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
21102 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
21103 MBB->addSuccessor(EndMBB);
21106 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
21107 // that was just emitted, but clearly shouldn't be "saved".
21108 assert((MI->getNumOperands() <= 3 ||
21109 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
21110 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
21111 && "Expected last argument to be EFLAGS");
21112 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
21113 // In the XMM save block, save all the XMM argument registers.
21114 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
21115 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
21116 MachineMemOperand *MMO =
21117 F->getMachineMemOperand(
21118 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
21119 MachineMemOperand::MOStore,
21120 /*Size=*/16, /*Align=*/16);
21121 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
21122 .addFrameIndex(RegSaveFrameIndex)
21123 .addImm(/*Scale=*/1)
21124 .addReg(/*IndexReg=*/0)
21125 .addImm(/*Disp=*/Offset)
21126 .addReg(/*Segment=*/0)
21127 .addReg(MI->getOperand(i).getReg())
21128 .addMemOperand(MMO);
21131 MI->eraseFromParent(); // The pseudo instruction is gone now.
21136 // The EFLAGS operand of SelectItr might be missing a kill marker
21137 // because there were multiple uses of EFLAGS, and ISel didn't know
21138 // which to mark. Figure out whether SelectItr should have had a
21139 // kill marker, and set it if it should. Returns the correct kill
21141 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
21142 MachineBasicBlock* BB,
21143 const TargetRegisterInfo* TRI) {
21144 // Scan forward through BB for a use/def of EFLAGS.
21145 MachineBasicBlock::iterator miI(std::next(SelectItr));
21146 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
21147 const MachineInstr& mi = *miI;
21148 if (mi.readsRegister(X86::EFLAGS))
21150 if (mi.definesRegister(X86::EFLAGS))
21151 break; // Should have kill-flag - update below.
21154 // If we hit the end of the block, check whether EFLAGS is live into a
21156 if (miI == BB->end()) {
21157 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
21158 sEnd = BB->succ_end();
21159 sItr != sEnd; ++sItr) {
21160 MachineBasicBlock* succ = *sItr;
21161 if (succ->isLiveIn(X86::EFLAGS))
21166 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
21167 // out. SelectMI should have a kill flag on EFLAGS.
21168 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
21172 MachineBasicBlock *
21173 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
21174 MachineBasicBlock *BB) const {
21175 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21176 DebugLoc DL = MI->getDebugLoc();
21178 // To "insert" a SELECT_CC instruction, we actually have to insert the
21179 // diamond control-flow pattern. The incoming instruction knows the
21180 // destination vreg to set, the condition code register to branch on, the
21181 // true/false values to select between, and a branch opcode to use.
21182 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21183 MachineFunction::iterator It = BB;
21189 // cmpTY ccX, r1, r2
21191 // fallthrough --> copy0MBB
21192 MachineBasicBlock *thisMBB = BB;
21193 MachineFunction *F = BB->getParent();
21194 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
21195 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
21196 F->insert(It, copy0MBB);
21197 F->insert(It, sinkMBB);
21199 // If the EFLAGS register isn't dead in the terminator, then claim that it's
21200 // live into the sink and copy blocks.
21201 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
21202 if (!MI->killsRegister(X86::EFLAGS) &&
21203 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
21204 copy0MBB->addLiveIn(X86::EFLAGS);
21205 sinkMBB->addLiveIn(X86::EFLAGS);
21208 // Transfer the remainder of BB and its successor edges to sinkMBB.
21209 sinkMBB->splice(sinkMBB->begin(), BB,
21210 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21211 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
21213 // Add the true and fallthrough blocks as its successors.
21214 BB->addSuccessor(copy0MBB);
21215 BB->addSuccessor(sinkMBB);
21217 // Create the conditional branch instruction.
21219 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
21220 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
21223 // %FalseValue = ...
21224 // # fallthrough to sinkMBB
21225 copy0MBB->addSuccessor(sinkMBB);
21228 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
21230 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21231 TII->get(X86::PHI), MI->getOperand(0).getReg())
21232 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
21233 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
21235 MI->eraseFromParent(); // The pseudo instruction is gone now.
21239 MachineBasicBlock *
21240 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
21241 MachineBasicBlock *BB) const {
21242 MachineFunction *MF = BB->getParent();
21243 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21244 DebugLoc DL = MI->getDebugLoc();
21245 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21247 assert(MF->shouldSplitStack());
21249 const bool Is64Bit = Subtarget->is64Bit();
21250 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21252 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21253 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21256 // ... [Till the alloca]
21257 // If stacklet is not large enough, jump to mallocMBB
21260 // Allocate by subtracting from RSP
21261 // Jump to continueMBB
21264 // Allocate by call to runtime
21268 // [rest of original BB]
21271 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21272 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21273 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21275 MachineRegisterInfo &MRI = MF->getRegInfo();
21276 const TargetRegisterClass *AddrRegClass =
21277 getRegClassFor(getPointerTy());
21279 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21280 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21281 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21282 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21283 sizeVReg = MI->getOperand(1).getReg(),
21284 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21286 MachineFunction::iterator MBBIter = BB;
21289 MF->insert(MBBIter, bumpMBB);
21290 MF->insert(MBBIter, mallocMBB);
21291 MF->insert(MBBIter, continueMBB);
21293 continueMBB->splice(continueMBB->begin(), BB,
21294 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21295 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21297 // Add code to the main basic block to check if the stack limit has been hit,
21298 // and if so, jump to mallocMBB otherwise to bumpMBB.
21299 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21300 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21301 .addReg(tmpSPVReg).addReg(sizeVReg);
21302 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21303 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21304 .addReg(SPLimitVReg);
21305 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21307 // bumpMBB simply decreases the stack pointer, since we know the current
21308 // stacklet has enough space.
21309 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21310 .addReg(SPLimitVReg);
21311 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21312 .addReg(SPLimitVReg);
21313 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21315 // Calls into a routine in libgcc to allocate more space from the heap.
21316 const uint32_t *RegMask =
21317 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21319 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21321 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21322 .addExternalSymbol("__morestack_allocate_stack_space")
21323 .addRegMask(RegMask)
21324 .addReg(X86::RDI, RegState::Implicit)
21325 .addReg(X86::RAX, RegState::ImplicitDefine);
21326 } else if (Is64Bit) {
21327 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21329 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21330 .addExternalSymbol("__morestack_allocate_stack_space")
21331 .addRegMask(RegMask)
21332 .addReg(X86::EDI, RegState::Implicit)
21333 .addReg(X86::EAX, RegState::ImplicitDefine);
21335 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21337 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21338 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21339 .addExternalSymbol("__morestack_allocate_stack_space")
21340 .addRegMask(RegMask)
21341 .addReg(X86::EAX, RegState::ImplicitDefine);
21345 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21348 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21349 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21350 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21352 // Set up the CFG correctly.
21353 BB->addSuccessor(bumpMBB);
21354 BB->addSuccessor(mallocMBB);
21355 mallocMBB->addSuccessor(continueMBB);
21356 bumpMBB->addSuccessor(continueMBB);
21358 // Take care of the PHI nodes.
21359 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21360 MI->getOperand(0).getReg())
21361 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21362 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21364 // Delete the original pseudo instruction.
21365 MI->eraseFromParent();
21368 return continueMBB;
21371 MachineBasicBlock *
21372 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21373 MachineBasicBlock *BB) const {
21374 DebugLoc DL = MI->getDebugLoc();
21376 assert(!Subtarget->isTargetMachO());
21378 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21380 MI->eraseFromParent(); // The pseudo instruction is gone now.
21384 MachineBasicBlock *
21385 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21386 MachineBasicBlock *BB) const {
21387 // This is pretty easy. We're taking the value that we received from
21388 // our load from the relocation, sticking it in either RDI (x86-64)
21389 // or EAX and doing an indirect call. The return value will then
21390 // be in the normal return register.
21391 MachineFunction *F = BB->getParent();
21392 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21393 DebugLoc DL = MI->getDebugLoc();
21395 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21396 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21398 // Get a register mask for the lowered call.
21399 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21400 // proper register mask.
21401 const uint32_t *RegMask =
21402 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21403 if (Subtarget->is64Bit()) {
21404 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21405 TII->get(X86::MOV64rm), X86::RDI)
21407 .addImm(0).addReg(0)
21408 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21409 MI->getOperand(3).getTargetFlags())
21411 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21412 addDirectMem(MIB, X86::RDI);
21413 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21414 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21415 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21416 TII->get(X86::MOV32rm), X86::EAX)
21418 .addImm(0).addReg(0)
21419 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21420 MI->getOperand(3).getTargetFlags())
21422 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21423 addDirectMem(MIB, X86::EAX);
21424 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21426 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21427 TII->get(X86::MOV32rm), X86::EAX)
21428 .addReg(TII->getGlobalBaseReg(F))
21429 .addImm(0).addReg(0)
21430 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21431 MI->getOperand(3).getTargetFlags())
21433 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21434 addDirectMem(MIB, X86::EAX);
21435 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21438 MI->eraseFromParent(); // The pseudo instruction is gone now.
21442 MachineBasicBlock *
21443 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21444 MachineBasicBlock *MBB) const {
21445 DebugLoc DL = MI->getDebugLoc();
21446 MachineFunction *MF = MBB->getParent();
21447 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21448 MachineRegisterInfo &MRI = MF->getRegInfo();
21450 const BasicBlock *BB = MBB->getBasicBlock();
21451 MachineFunction::iterator I = MBB;
21454 // Memory Reference
21455 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21456 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21459 unsigned MemOpndSlot = 0;
21461 unsigned CurOp = 0;
21463 DstReg = MI->getOperand(CurOp++).getReg();
21464 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21465 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21466 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21467 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21469 MemOpndSlot = CurOp;
21471 MVT PVT = getPointerTy();
21472 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21473 "Invalid Pointer Size!");
21475 // For v = setjmp(buf), we generate
21478 // buf[LabelOffset] = restoreMBB
21479 // SjLjSetup restoreMBB
21485 // v = phi(main, restore)
21488 // if base pointer being used, load it from frame
21491 MachineBasicBlock *thisMBB = MBB;
21492 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21493 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21494 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21495 MF->insert(I, mainMBB);
21496 MF->insert(I, sinkMBB);
21497 MF->push_back(restoreMBB);
21499 MachineInstrBuilder MIB;
21501 // Transfer the remainder of BB and its successor edges to sinkMBB.
21502 sinkMBB->splice(sinkMBB->begin(), MBB,
21503 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21504 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21507 unsigned PtrStoreOpc = 0;
21508 unsigned LabelReg = 0;
21509 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21510 Reloc::Model RM = MF->getTarget().getRelocationModel();
21511 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21512 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21514 // Prepare IP either in reg or imm.
21515 if (!UseImmLabel) {
21516 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21517 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21518 LabelReg = MRI.createVirtualRegister(PtrRC);
21519 if (Subtarget->is64Bit()) {
21520 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21524 .addMBB(restoreMBB)
21527 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21528 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21529 .addReg(XII->getGlobalBaseReg(MF))
21532 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21536 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21538 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21539 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21540 if (i == X86::AddrDisp)
21541 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21543 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21546 MIB.addReg(LabelReg);
21548 MIB.addMBB(restoreMBB);
21549 MIB.setMemRefs(MMOBegin, MMOEnd);
21551 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21552 .addMBB(restoreMBB);
21554 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21555 MIB.addRegMask(RegInfo->getNoPreservedMask());
21556 thisMBB->addSuccessor(mainMBB);
21557 thisMBB->addSuccessor(restoreMBB);
21561 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21562 mainMBB->addSuccessor(sinkMBB);
21565 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21566 TII->get(X86::PHI), DstReg)
21567 .addReg(mainDstReg).addMBB(mainMBB)
21568 .addReg(restoreDstReg).addMBB(restoreMBB);
21571 if (RegInfo->hasBasePointer(*MF)) {
21572 const bool Uses64BitFramePtr =
21573 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
21574 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21575 X86FI->setRestoreBasePointer(MF);
21576 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21577 unsigned BasePtr = RegInfo->getBaseRegister();
21578 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21579 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21580 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21581 .setMIFlag(MachineInstr::FrameSetup);
21583 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21584 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21585 restoreMBB->addSuccessor(sinkMBB);
21587 MI->eraseFromParent();
21591 MachineBasicBlock *
21592 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21593 MachineBasicBlock *MBB) const {
21594 DebugLoc DL = MI->getDebugLoc();
21595 MachineFunction *MF = MBB->getParent();
21596 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21597 MachineRegisterInfo &MRI = MF->getRegInfo();
21599 // Memory Reference
21600 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21601 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21603 MVT PVT = getPointerTy();
21604 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21605 "Invalid Pointer Size!");
21607 const TargetRegisterClass *RC =
21608 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21609 unsigned Tmp = MRI.createVirtualRegister(RC);
21610 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21611 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21612 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21613 unsigned SP = RegInfo->getStackRegister();
21615 MachineInstrBuilder MIB;
21617 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21618 const int64_t SPOffset = 2 * PVT.getStoreSize();
21620 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21621 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21624 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21625 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21626 MIB.addOperand(MI->getOperand(i));
21627 MIB.setMemRefs(MMOBegin, MMOEnd);
21629 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21630 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21631 if (i == X86::AddrDisp)
21632 MIB.addDisp(MI->getOperand(i), LabelOffset);
21634 MIB.addOperand(MI->getOperand(i));
21636 MIB.setMemRefs(MMOBegin, MMOEnd);
21638 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21639 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21640 if (i == X86::AddrDisp)
21641 MIB.addDisp(MI->getOperand(i), SPOffset);
21643 MIB.addOperand(MI->getOperand(i));
21645 MIB.setMemRefs(MMOBegin, MMOEnd);
21647 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21649 MI->eraseFromParent();
21653 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21654 // accumulator loops. Writing back to the accumulator allows the coalescer
21655 // to remove extra copies in the loop.
21656 MachineBasicBlock *
21657 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21658 MachineBasicBlock *MBB) const {
21659 MachineOperand &AddendOp = MI->getOperand(3);
21661 // Bail out early if the addend isn't a register - we can't switch these.
21662 if (!AddendOp.isReg())
21665 MachineFunction &MF = *MBB->getParent();
21666 MachineRegisterInfo &MRI = MF.getRegInfo();
21668 // Check whether the addend is defined by a PHI:
21669 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21670 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21671 if (!AddendDef.isPHI())
21674 // Look for the following pattern:
21676 // %addend = phi [%entry, 0], [%loop, %result]
21678 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21682 // %addend = phi [%entry, 0], [%loop, %result]
21684 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21686 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21687 assert(AddendDef.getOperand(i).isReg());
21688 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21689 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21690 if (&PHISrcInst == MI) {
21691 // Found a matching instruction.
21692 unsigned NewFMAOpc = 0;
21693 switch (MI->getOpcode()) {
21694 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21695 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21696 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21697 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21698 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21699 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21700 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21701 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21702 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21703 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21704 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21705 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21706 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21707 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21708 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21709 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21710 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21711 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21712 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21713 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21715 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21716 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21717 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21718 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21719 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21720 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21721 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21722 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21723 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21724 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21725 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21726 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21727 default: llvm_unreachable("Unrecognized FMA variant.");
21730 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
21731 MachineInstrBuilder MIB =
21732 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21733 .addOperand(MI->getOperand(0))
21734 .addOperand(MI->getOperand(3))
21735 .addOperand(MI->getOperand(2))
21736 .addOperand(MI->getOperand(1));
21737 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21738 MI->eraseFromParent();
21745 MachineBasicBlock *
21746 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21747 MachineBasicBlock *BB) const {
21748 switch (MI->getOpcode()) {
21749 default: llvm_unreachable("Unexpected instr type to insert");
21750 case X86::TAILJMPd64:
21751 case X86::TAILJMPr64:
21752 case X86::TAILJMPm64:
21753 case X86::TAILJMPd64_REX:
21754 case X86::TAILJMPr64_REX:
21755 case X86::TAILJMPm64_REX:
21756 llvm_unreachable("TAILJMP64 would not be touched here.");
21757 case X86::TCRETURNdi64:
21758 case X86::TCRETURNri64:
21759 case X86::TCRETURNmi64:
21761 case X86::WIN_ALLOCA:
21762 return EmitLoweredWinAlloca(MI, BB);
21763 case X86::SEG_ALLOCA_32:
21764 case X86::SEG_ALLOCA_64:
21765 return EmitLoweredSegAlloca(MI, BB);
21766 case X86::TLSCall_32:
21767 case X86::TLSCall_64:
21768 return EmitLoweredTLSCall(MI, BB);
21769 case X86::CMOV_GR8:
21770 case X86::CMOV_FR32:
21771 case X86::CMOV_FR64:
21772 case X86::CMOV_V4F32:
21773 case X86::CMOV_V2F64:
21774 case X86::CMOV_V2I64:
21775 case X86::CMOV_V8F32:
21776 case X86::CMOV_V4F64:
21777 case X86::CMOV_V4I64:
21778 case X86::CMOV_V16F32:
21779 case X86::CMOV_V8F64:
21780 case X86::CMOV_V8I64:
21781 case X86::CMOV_GR16:
21782 case X86::CMOV_GR32:
21783 case X86::CMOV_RFP32:
21784 case X86::CMOV_RFP64:
21785 case X86::CMOV_RFP80:
21786 return EmitLoweredSelect(MI, BB);
21788 case X86::FP32_TO_INT16_IN_MEM:
21789 case X86::FP32_TO_INT32_IN_MEM:
21790 case X86::FP32_TO_INT64_IN_MEM:
21791 case X86::FP64_TO_INT16_IN_MEM:
21792 case X86::FP64_TO_INT32_IN_MEM:
21793 case X86::FP64_TO_INT64_IN_MEM:
21794 case X86::FP80_TO_INT16_IN_MEM:
21795 case X86::FP80_TO_INT32_IN_MEM:
21796 case X86::FP80_TO_INT64_IN_MEM: {
21797 MachineFunction *F = BB->getParent();
21798 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21799 DebugLoc DL = MI->getDebugLoc();
21801 // Change the floating point control register to use "round towards zero"
21802 // mode when truncating to an integer value.
21803 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21804 addFrameReference(BuildMI(*BB, MI, DL,
21805 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21807 // Load the old value of the high byte of the control word...
21809 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21810 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21813 // Set the high part to be round to zero...
21814 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21817 // Reload the modified control word now...
21818 addFrameReference(BuildMI(*BB, MI, DL,
21819 TII->get(X86::FLDCW16m)), CWFrameIdx);
21821 // Restore the memory image of control word to original value
21822 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21825 // Get the X86 opcode to use.
21827 switch (MI->getOpcode()) {
21828 default: llvm_unreachable("illegal opcode!");
21829 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21830 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21831 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21832 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21833 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21834 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21835 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21836 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21837 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21841 MachineOperand &Op = MI->getOperand(0);
21843 AM.BaseType = X86AddressMode::RegBase;
21844 AM.Base.Reg = Op.getReg();
21846 AM.BaseType = X86AddressMode::FrameIndexBase;
21847 AM.Base.FrameIndex = Op.getIndex();
21849 Op = MI->getOperand(1);
21851 AM.Scale = Op.getImm();
21852 Op = MI->getOperand(2);
21854 AM.IndexReg = Op.getImm();
21855 Op = MI->getOperand(3);
21856 if (Op.isGlobal()) {
21857 AM.GV = Op.getGlobal();
21859 AM.Disp = Op.getImm();
21861 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
21862 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
21864 // Reload the original control word now.
21865 addFrameReference(BuildMI(*BB, MI, DL,
21866 TII->get(X86::FLDCW16m)), CWFrameIdx);
21868 MI->eraseFromParent(); // The pseudo instruction is gone now.
21871 // String/text processing lowering.
21872 case X86::PCMPISTRM128REG:
21873 case X86::VPCMPISTRM128REG:
21874 case X86::PCMPISTRM128MEM:
21875 case X86::VPCMPISTRM128MEM:
21876 case X86::PCMPESTRM128REG:
21877 case X86::VPCMPESTRM128REG:
21878 case X86::PCMPESTRM128MEM:
21879 case X86::VPCMPESTRM128MEM:
21880 assert(Subtarget->hasSSE42() &&
21881 "Target must have SSE4.2 or AVX features enabled");
21882 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
21884 // String/text processing lowering.
21885 case X86::PCMPISTRIREG:
21886 case X86::VPCMPISTRIREG:
21887 case X86::PCMPISTRIMEM:
21888 case X86::VPCMPISTRIMEM:
21889 case X86::PCMPESTRIREG:
21890 case X86::VPCMPESTRIREG:
21891 case X86::PCMPESTRIMEM:
21892 case X86::VPCMPESTRIMEM:
21893 assert(Subtarget->hasSSE42() &&
21894 "Target must have SSE4.2 or AVX features enabled");
21895 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
21897 // Thread synchronization.
21899 return EmitMonitor(MI, BB, Subtarget);
21903 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
21905 case X86::VASTART_SAVE_XMM_REGS:
21906 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
21908 case X86::VAARG_64:
21909 return EmitVAARG64WithCustomInserter(MI, BB);
21911 case X86::EH_SjLj_SetJmp32:
21912 case X86::EH_SjLj_SetJmp64:
21913 return emitEHSjLjSetJmp(MI, BB);
21915 case X86::EH_SjLj_LongJmp32:
21916 case X86::EH_SjLj_LongJmp64:
21917 return emitEHSjLjLongJmp(MI, BB);
21919 case TargetOpcode::STATEPOINT:
21920 // As an implementation detail, STATEPOINT shares the STACKMAP format at
21921 // this point in the process. We diverge later.
21922 return emitPatchPoint(MI, BB);
21924 case TargetOpcode::STACKMAP:
21925 case TargetOpcode::PATCHPOINT:
21926 return emitPatchPoint(MI, BB);
21928 case X86::VFMADDPDr213r:
21929 case X86::VFMADDPSr213r:
21930 case X86::VFMADDSDr213r:
21931 case X86::VFMADDSSr213r:
21932 case X86::VFMSUBPDr213r:
21933 case X86::VFMSUBPSr213r:
21934 case X86::VFMSUBSDr213r:
21935 case X86::VFMSUBSSr213r:
21936 case X86::VFNMADDPDr213r:
21937 case X86::VFNMADDPSr213r:
21938 case X86::VFNMADDSDr213r:
21939 case X86::VFNMADDSSr213r:
21940 case X86::VFNMSUBPDr213r:
21941 case X86::VFNMSUBPSr213r:
21942 case X86::VFNMSUBSDr213r:
21943 case X86::VFNMSUBSSr213r:
21944 case X86::VFMADDSUBPDr213r:
21945 case X86::VFMADDSUBPSr213r:
21946 case X86::VFMSUBADDPDr213r:
21947 case X86::VFMSUBADDPSr213r:
21948 case X86::VFMADDPDr213rY:
21949 case X86::VFMADDPSr213rY:
21950 case X86::VFMSUBPDr213rY:
21951 case X86::VFMSUBPSr213rY:
21952 case X86::VFNMADDPDr213rY:
21953 case X86::VFNMADDPSr213rY:
21954 case X86::VFNMSUBPDr213rY:
21955 case X86::VFNMSUBPSr213rY:
21956 case X86::VFMADDSUBPDr213rY:
21957 case X86::VFMADDSUBPSr213rY:
21958 case X86::VFMSUBADDPDr213rY:
21959 case X86::VFMSUBADDPSr213rY:
21960 return emitFMA3Instr(MI, BB);
21964 //===----------------------------------------------------------------------===//
21965 // X86 Optimization Hooks
21966 //===----------------------------------------------------------------------===//
21968 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
21971 const SelectionDAG &DAG,
21972 unsigned Depth) const {
21973 unsigned BitWidth = KnownZero.getBitWidth();
21974 unsigned Opc = Op.getOpcode();
21975 assert((Opc >= ISD::BUILTIN_OP_END ||
21976 Opc == ISD::INTRINSIC_WO_CHAIN ||
21977 Opc == ISD::INTRINSIC_W_CHAIN ||
21978 Opc == ISD::INTRINSIC_VOID) &&
21979 "Should use MaskedValueIsZero if you don't know whether Op"
21980 " is a target node!");
21982 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
21996 // These nodes' second result is a boolean.
21997 if (Op.getResNo() == 0)
22000 case X86ISD::SETCC:
22001 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
22003 case ISD::INTRINSIC_WO_CHAIN: {
22004 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
22005 unsigned NumLoBits = 0;
22008 case Intrinsic::x86_sse_movmsk_ps:
22009 case Intrinsic::x86_avx_movmsk_ps_256:
22010 case Intrinsic::x86_sse2_movmsk_pd:
22011 case Intrinsic::x86_avx_movmsk_pd_256:
22012 case Intrinsic::x86_mmx_pmovmskb:
22013 case Intrinsic::x86_sse2_pmovmskb_128:
22014 case Intrinsic::x86_avx2_pmovmskb: {
22015 // High bits of movmskp{s|d}, pmovmskb are known zero.
22017 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
22018 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
22019 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
22020 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
22021 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
22022 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
22023 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
22024 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
22026 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
22035 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
22037 const SelectionDAG &,
22038 unsigned Depth) const {
22039 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
22040 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
22041 return Op.getValueType().getScalarType().getSizeInBits();
22047 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
22048 /// node is a GlobalAddress + offset.
22049 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
22050 const GlobalValue* &GA,
22051 int64_t &Offset) const {
22052 if (N->getOpcode() == X86ISD::Wrapper) {
22053 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
22054 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
22055 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
22059 return TargetLowering::isGAPlusOffset(N, GA, Offset);
22062 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
22063 /// same as extracting the high 128-bit part of 256-bit vector and then
22064 /// inserting the result into the low part of a new 256-bit vector
22065 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
22066 EVT VT = SVOp->getValueType(0);
22067 unsigned NumElems = VT.getVectorNumElements();
22069 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22070 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
22071 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22072 SVOp->getMaskElt(j) >= 0)
22078 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
22079 /// same as extracting the low 128-bit part of 256-bit vector and then
22080 /// inserting the result into the high part of a new 256-bit vector
22081 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
22082 EVT VT = SVOp->getValueType(0);
22083 unsigned NumElems = VT.getVectorNumElements();
22085 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22086 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
22087 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22088 SVOp->getMaskElt(j) >= 0)
22094 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
22095 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
22096 TargetLowering::DAGCombinerInfo &DCI,
22097 const X86Subtarget* Subtarget) {
22099 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22100 SDValue V1 = SVOp->getOperand(0);
22101 SDValue V2 = SVOp->getOperand(1);
22102 EVT VT = SVOp->getValueType(0);
22103 unsigned NumElems = VT.getVectorNumElements();
22105 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
22106 V2.getOpcode() == ISD::CONCAT_VECTORS) {
22110 // V UNDEF BUILD_VECTOR UNDEF
22112 // CONCAT_VECTOR CONCAT_VECTOR
22115 // RESULT: V + zero extended
22117 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
22118 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
22119 V1.getOperand(1).getOpcode() != ISD::UNDEF)
22122 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
22125 // To match the shuffle mask, the first half of the mask should
22126 // be exactly the first vector, and all the rest a splat with the
22127 // first element of the second one.
22128 for (unsigned i = 0; i != NumElems/2; ++i)
22129 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
22130 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
22133 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
22134 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
22135 if (Ld->hasNUsesOfValue(1, 0)) {
22136 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
22137 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
22139 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
22141 Ld->getPointerInfo(),
22142 Ld->getAlignment(),
22143 false/*isVolatile*/, true/*ReadMem*/,
22144 false/*WriteMem*/);
22146 // Make sure the newly-created LOAD is in the same position as Ld in
22147 // terms of dependency. We create a TokenFactor for Ld and ResNode,
22148 // and update uses of Ld's output chain to use the TokenFactor.
22149 if (Ld->hasAnyUseOfValue(1)) {
22150 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
22151 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
22152 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
22153 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
22154 SDValue(ResNode.getNode(), 1));
22157 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
22161 // Emit a zeroed vector and insert the desired subvector on its
22163 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
22164 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
22165 return DCI.CombineTo(N, InsV);
22168 //===--------------------------------------------------------------------===//
22169 // Combine some shuffles into subvector extracts and inserts:
22172 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22173 if (isShuffleHigh128VectorInsertLow(SVOp)) {
22174 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
22175 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
22176 return DCI.CombineTo(N, InsV);
22179 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22180 if (isShuffleLow128VectorInsertHigh(SVOp)) {
22181 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
22182 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
22183 return DCI.CombineTo(N, InsV);
22189 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
22192 /// This is the leaf of the recursive combinine below. When we have found some
22193 /// chain of single-use x86 shuffle instructions and accumulated the combined
22194 /// shuffle mask represented by them, this will try to pattern match that mask
22195 /// into either a single instruction if there is a special purpose instruction
22196 /// for this operation, or into a PSHUFB instruction which is a fully general
22197 /// instruction but should only be used to replace chains over a certain depth.
22198 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22199 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22200 TargetLowering::DAGCombinerInfo &DCI,
22201 const X86Subtarget *Subtarget) {
22202 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22204 // Find the operand that enters the chain. Note that multiple uses are OK
22205 // here, we're not going to remove the operand we find.
22206 SDValue Input = Op.getOperand(0);
22207 while (Input.getOpcode() == ISD::BITCAST)
22208 Input = Input.getOperand(0);
22210 MVT VT = Input.getSimpleValueType();
22211 MVT RootVT = Root.getSimpleValueType();
22214 // Just remove no-op shuffle masks.
22215 if (Mask.size() == 1) {
22216 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
22221 // Use the float domain if the operand type is a floating point type.
22222 bool FloatDomain = VT.isFloatingPoint();
22224 // For floating point shuffles, we don't have free copies in the shuffle
22225 // instructions or the ability to load as part of the instruction, so
22226 // canonicalize their shuffles to UNPCK or MOV variants.
22228 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22229 // vectors because it can have a load folded into it that UNPCK cannot. This
22230 // doesn't preclude something switching to the shorter encoding post-RA.
22232 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
22233 bool Lo = Mask.equals(0, 0);
22236 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22237 // is no slower than UNPCKLPD but has the option to fold the input operand
22238 // into even an unaligned memory load.
22239 if (Lo && Subtarget->hasSSE3()) {
22240 Shuffle = X86ISD::MOVDDUP;
22241 ShuffleVT = MVT::v2f64;
22243 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22244 // than the UNPCK variants.
22245 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22246 ShuffleVT = MVT::v4f32;
22248 if (Depth == 1 && Root->getOpcode() == Shuffle)
22249 return false; // Nothing to do!
22250 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22251 DCI.AddToWorklist(Op.getNode());
22252 if (Shuffle == X86ISD::MOVDDUP)
22253 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22255 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22256 DCI.AddToWorklist(Op.getNode());
22257 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22261 if (Subtarget->hasSSE3() &&
22262 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22263 bool Lo = Mask.equals(0, 0, 2, 2);
22264 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22265 MVT ShuffleVT = MVT::v4f32;
22266 if (Depth == 1 && Root->getOpcode() == Shuffle)
22267 return false; // Nothing to do!
22268 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22269 DCI.AddToWorklist(Op.getNode());
22270 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22271 DCI.AddToWorklist(Op.getNode());
22272 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22276 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22277 bool Lo = Mask.equals(0, 0, 1, 1);
22278 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22279 MVT ShuffleVT = MVT::v4f32;
22280 if (Depth == 1 && Root->getOpcode() == Shuffle)
22281 return false; // Nothing to do!
22282 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22283 DCI.AddToWorklist(Op.getNode());
22284 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22285 DCI.AddToWorklist(Op.getNode());
22286 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22292 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22293 // variants as none of these have single-instruction variants that are
22294 // superior to the UNPCK formulation.
22295 if (!FloatDomain &&
22296 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22297 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22298 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22299 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22301 bool Lo = Mask[0] == 0;
22302 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22303 if (Depth == 1 && Root->getOpcode() == Shuffle)
22304 return false; // Nothing to do!
22306 switch (Mask.size()) {
22308 ShuffleVT = MVT::v8i16;
22311 ShuffleVT = MVT::v16i8;
22314 llvm_unreachable("Impossible mask size!");
22316 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22317 DCI.AddToWorklist(Op.getNode());
22318 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22319 DCI.AddToWorklist(Op.getNode());
22320 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22325 // Don't try to re-form single instruction chains under any circumstances now
22326 // that we've done encoding canonicalization for them.
22330 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22331 // can replace them with a single PSHUFB instruction profitably. Intel's
22332 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22333 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22334 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22335 SmallVector<SDValue, 16> PSHUFBMask;
22336 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22337 int Ratio = 16 / Mask.size();
22338 for (unsigned i = 0; i < 16; ++i) {
22339 if (Mask[i / Ratio] == SM_SentinelUndef) {
22340 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22343 int M = Mask[i / Ratio] != SM_SentinelZero
22344 ? Ratio * Mask[i / Ratio] + i % Ratio
22346 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22348 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22349 DCI.AddToWorklist(Op.getNode());
22350 SDValue PSHUFBMaskOp =
22351 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22352 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22353 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22354 DCI.AddToWorklist(Op.getNode());
22355 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22360 // Failed to find any combines.
22364 /// \brief Fully generic combining of x86 shuffle instructions.
22366 /// This should be the last combine run over the x86 shuffle instructions. Once
22367 /// they have been fully optimized, this will recursively consider all chains
22368 /// of single-use shuffle instructions, build a generic model of the cumulative
22369 /// shuffle operation, and check for simpler instructions which implement this
22370 /// operation. We use this primarily for two purposes:
22372 /// 1) Collapse generic shuffles to specialized single instructions when
22373 /// equivalent. In most cases, this is just an encoding size win, but
22374 /// sometimes we will collapse multiple generic shuffles into a single
22375 /// special-purpose shuffle.
22376 /// 2) Look for sequences of shuffle instructions with 3 or more total
22377 /// instructions, and replace them with the slightly more expensive SSSE3
22378 /// PSHUFB instruction if available. We do this as the last combining step
22379 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22380 /// a suitable short sequence of other instructions. The PHUFB will either
22381 /// use a register or have to read from memory and so is slightly (but only
22382 /// slightly) more expensive than the other shuffle instructions.
22384 /// Because this is inherently a quadratic operation (for each shuffle in
22385 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22386 /// This should never be an issue in practice as the shuffle lowering doesn't
22387 /// produce sequences of more than 8 instructions.
22389 /// FIXME: We will currently miss some cases where the redundant shuffling
22390 /// would simplify under the threshold for PSHUFB formation because of
22391 /// combine-ordering. To fix this, we should do the redundant instruction
22392 /// combining in this recursive walk.
22393 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22394 ArrayRef<int> RootMask,
22395 int Depth, bool HasPSHUFB,
22397 TargetLowering::DAGCombinerInfo &DCI,
22398 const X86Subtarget *Subtarget) {
22399 // Bound the depth of our recursive combine because this is ultimately
22400 // quadratic in nature.
22404 // Directly rip through bitcasts to find the underlying operand.
22405 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22406 Op = Op.getOperand(0);
22408 MVT VT = Op.getSimpleValueType();
22409 if (!VT.isVector())
22410 return false; // Bail if we hit a non-vector.
22411 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22412 // version should be added.
22413 if (VT.getSizeInBits() != 128)
22416 assert(Root.getSimpleValueType().isVector() &&
22417 "Shuffles operate on vector types!");
22418 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22419 "Can only combine shuffles of the same vector register size.");
22421 if (!isTargetShuffle(Op.getOpcode()))
22423 SmallVector<int, 16> OpMask;
22425 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22426 // We only can combine unary shuffles which we can decode the mask for.
22427 if (!HaveMask || !IsUnary)
22430 assert(VT.getVectorNumElements() == OpMask.size() &&
22431 "Different mask size from vector size!");
22432 assert(((RootMask.size() > OpMask.size() &&
22433 RootMask.size() % OpMask.size() == 0) ||
22434 (OpMask.size() > RootMask.size() &&
22435 OpMask.size() % RootMask.size() == 0) ||
22436 OpMask.size() == RootMask.size()) &&
22437 "The smaller number of elements must divide the larger.");
22438 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22439 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22440 assert(((RootRatio == 1 && OpRatio == 1) ||
22441 (RootRatio == 1) != (OpRatio == 1)) &&
22442 "Must not have a ratio for both incoming and op masks!");
22444 SmallVector<int, 16> Mask;
22445 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22447 // Merge this shuffle operation's mask into our accumulated mask. Note that
22448 // this shuffle's mask will be the first applied to the input, followed by the
22449 // root mask to get us all the way to the root value arrangement. The reason
22450 // for this order is that we are recursing up the operation chain.
22451 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22452 int RootIdx = i / RootRatio;
22453 if (RootMask[RootIdx] < 0) {
22454 // This is a zero or undef lane, we're done.
22455 Mask.push_back(RootMask[RootIdx]);
22459 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22460 int OpIdx = RootMaskedIdx / OpRatio;
22461 if (OpMask[OpIdx] < 0) {
22462 // The incoming lanes are zero or undef, it doesn't matter which ones we
22464 Mask.push_back(OpMask[OpIdx]);
22468 // Ok, we have non-zero lanes, map them through.
22469 Mask.push_back(OpMask[OpIdx] * OpRatio +
22470 RootMaskedIdx % OpRatio);
22473 // See if we can recurse into the operand to combine more things.
22474 switch (Op.getOpcode()) {
22475 case X86ISD::PSHUFB:
22477 case X86ISD::PSHUFD:
22478 case X86ISD::PSHUFHW:
22479 case X86ISD::PSHUFLW:
22480 if (Op.getOperand(0).hasOneUse() &&
22481 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22482 HasPSHUFB, DAG, DCI, Subtarget))
22486 case X86ISD::UNPCKL:
22487 case X86ISD::UNPCKH:
22488 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22489 // We can't check for single use, we have to check that this shuffle is the only user.
22490 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22491 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22492 HasPSHUFB, DAG, DCI, Subtarget))
22497 // Minor canonicalization of the accumulated shuffle mask to make it easier
22498 // to match below. All this does is detect masks with squential pairs of
22499 // elements, and shrink them to the half-width mask. It does this in a loop
22500 // so it will reduce the size of the mask to the minimal width mask which
22501 // performs an equivalent shuffle.
22502 SmallVector<int, 16> WidenedMask;
22503 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22504 Mask = std::move(WidenedMask);
22505 WidenedMask.clear();
22508 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22512 /// \brief Get the PSHUF-style mask from PSHUF node.
22514 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22515 /// PSHUF-style masks that can be reused with such instructions.
22516 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22517 SmallVector<int, 4> Mask;
22519 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22523 switch (N.getOpcode()) {
22524 case X86ISD::PSHUFD:
22526 case X86ISD::PSHUFLW:
22529 case X86ISD::PSHUFHW:
22530 Mask.erase(Mask.begin(), Mask.begin() + 4);
22531 for (int &M : Mask)
22535 llvm_unreachable("No valid shuffle instruction found!");
22539 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22541 /// We walk up the chain and look for a combinable shuffle, skipping over
22542 /// shuffles that we could hoist this shuffle's transformation past without
22543 /// altering anything.
22545 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22547 TargetLowering::DAGCombinerInfo &DCI) {
22548 assert(N.getOpcode() == X86ISD::PSHUFD &&
22549 "Called with something other than an x86 128-bit half shuffle!");
22552 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22553 // of the shuffles in the chain so that we can form a fresh chain to replace
22555 SmallVector<SDValue, 8> Chain;
22556 SDValue V = N.getOperand(0);
22557 for (; V.hasOneUse(); V = V.getOperand(0)) {
22558 switch (V.getOpcode()) {
22560 return SDValue(); // Nothing combined!
22563 // Skip bitcasts as we always know the type for the target specific
22567 case X86ISD::PSHUFD:
22568 // Found another dword shuffle.
22571 case X86ISD::PSHUFLW:
22572 // Check that the low words (being shuffled) are the identity in the
22573 // dword shuffle, and the high words are self-contained.
22574 if (Mask[0] != 0 || Mask[1] != 1 ||
22575 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22578 Chain.push_back(V);
22581 case X86ISD::PSHUFHW:
22582 // Check that the high words (being shuffled) are the identity in the
22583 // dword shuffle, and the low words are self-contained.
22584 if (Mask[2] != 2 || Mask[3] != 3 ||
22585 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22588 Chain.push_back(V);
22591 case X86ISD::UNPCKL:
22592 case X86ISD::UNPCKH:
22593 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22594 // shuffle into a preceding word shuffle.
22595 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22598 // Search for a half-shuffle which we can combine with.
22599 unsigned CombineOp =
22600 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22601 if (V.getOperand(0) != V.getOperand(1) ||
22602 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22604 Chain.push_back(V);
22605 V = V.getOperand(0);
22607 switch (V.getOpcode()) {
22609 return SDValue(); // Nothing to combine.
22611 case X86ISD::PSHUFLW:
22612 case X86ISD::PSHUFHW:
22613 if (V.getOpcode() == CombineOp)
22616 Chain.push_back(V);
22620 V = V.getOperand(0);
22624 } while (V.hasOneUse());
22627 // Break out of the loop if we break out of the switch.
22631 if (!V.hasOneUse())
22632 // We fell out of the loop without finding a viable combining instruction.
22635 // Merge this node's mask and our incoming mask.
22636 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22637 for (int &M : Mask)
22639 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22640 getV4X86ShuffleImm8ForMask(Mask, DAG));
22642 // Rebuild the chain around this new shuffle.
22643 while (!Chain.empty()) {
22644 SDValue W = Chain.pop_back_val();
22646 if (V.getValueType() != W.getOperand(0).getValueType())
22647 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22649 switch (W.getOpcode()) {
22651 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22653 case X86ISD::UNPCKL:
22654 case X86ISD::UNPCKH:
22655 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22658 case X86ISD::PSHUFD:
22659 case X86ISD::PSHUFLW:
22660 case X86ISD::PSHUFHW:
22661 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22665 if (V.getValueType() != N.getValueType())
22666 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22668 // Return the new chain to replace N.
22672 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22674 /// We walk up the chain, skipping shuffles of the other half and looking
22675 /// through shuffles which switch halves trying to find a shuffle of the same
22676 /// pair of dwords.
22677 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22679 TargetLowering::DAGCombinerInfo &DCI) {
22681 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22682 "Called with something other than an x86 128-bit half shuffle!");
22684 unsigned CombineOpcode = N.getOpcode();
22686 // Walk up a single-use chain looking for a combinable shuffle.
22687 SDValue V = N.getOperand(0);
22688 for (; V.hasOneUse(); V = V.getOperand(0)) {
22689 switch (V.getOpcode()) {
22691 return false; // Nothing combined!
22694 // Skip bitcasts as we always know the type for the target specific
22698 case X86ISD::PSHUFLW:
22699 case X86ISD::PSHUFHW:
22700 if (V.getOpcode() == CombineOpcode)
22703 // Other-half shuffles are no-ops.
22706 // Break out of the loop if we break out of the switch.
22710 if (!V.hasOneUse())
22711 // We fell out of the loop without finding a viable combining instruction.
22714 // Combine away the bottom node as its shuffle will be accumulated into
22715 // a preceding shuffle.
22716 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22718 // Record the old value.
22721 // Merge this node's mask and our incoming mask (adjusted to account for all
22722 // the pshufd instructions encountered).
22723 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22724 for (int &M : Mask)
22726 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22727 getV4X86ShuffleImm8ForMask(Mask, DAG));
22729 // Check that the shuffles didn't cancel each other out. If not, we need to
22730 // combine to the new one.
22732 // Replace the combinable shuffle with the combined one, updating all users
22733 // so that we re-evaluate the chain here.
22734 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22739 /// \brief Try to combine x86 target specific shuffles.
22740 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22741 TargetLowering::DAGCombinerInfo &DCI,
22742 const X86Subtarget *Subtarget) {
22744 MVT VT = N.getSimpleValueType();
22745 SmallVector<int, 4> Mask;
22747 switch (N.getOpcode()) {
22748 case X86ISD::PSHUFD:
22749 case X86ISD::PSHUFLW:
22750 case X86ISD::PSHUFHW:
22751 Mask = getPSHUFShuffleMask(N);
22752 assert(Mask.size() == 4);
22758 // Nuke no-op shuffles that show up after combining.
22759 if (isNoopShuffleMask(Mask))
22760 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22762 // Look for simplifications involving one or two shuffle instructions.
22763 SDValue V = N.getOperand(0);
22764 switch (N.getOpcode()) {
22767 case X86ISD::PSHUFLW:
22768 case X86ISD::PSHUFHW:
22769 assert(VT == MVT::v8i16);
22772 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22773 return SDValue(); // We combined away this shuffle, so we're done.
22775 // See if this reduces to a PSHUFD which is no more expensive and can
22776 // combine with more operations. Note that it has to at least flip the
22777 // dwords as otherwise it would have been removed as a no-op.
22778 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22779 int DMask[] = {0, 1, 2, 3};
22780 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22781 DMask[DOffset + 0] = DOffset + 1;
22782 DMask[DOffset + 1] = DOffset + 0;
22783 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22784 DCI.AddToWorklist(V.getNode());
22785 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22786 getV4X86ShuffleImm8ForMask(DMask, DAG));
22787 DCI.AddToWorklist(V.getNode());
22788 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22791 // Look for shuffle patterns which can be implemented as a single unpack.
22792 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22793 // only works when we have a PSHUFD followed by two half-shuffles.
22794 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22795 (V.getOpcode() == X86ISD::PSHUFLW ||
22796 V.getOpcode() == X86ISD::PSHUFHW) &&
22797 V.getOpcode() != N.getOpcode() &&
22799 SDValue D = V.getOperand(0);
22800 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22801 D = D.getOperand(0);
22802 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22803 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22804 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22805 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22806 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22808 for (int i = 0; i < 4; ++i) {
22809 WordMask[i + NOffset] = Mask[i] + NOffset;
22810 WordMask[i + VOffset] = VMask[i] + VOffset;
22812 // Map the word mask through the DWord mask.
22814 for (int i = 0; i < 8; ++i)
22815 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22816 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22817 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22818 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22819 std::begin(UnpackLoMask)) ||
22820 std::equal(std::begin(MappedMask), std::end(MappedMask),
22821 std::begin(UnpackHiMask))) {
22822 // We can replace all three shuffles with an unpack.
22823 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22824 DCI.AddToWorklist(V.getNode());
22825 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22827 DL, MVT::v8i16, V, V);
22834 case X86ISD::PSHUFD:
22835 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22844 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22846 /// We combine this directly on the abstract vector shuffle nodes so it is
22847 /// easier to generically match. We also insert dummy vector shuffle nodes for
22848 /// the operands which explicitly discard the lanes which are unused by this
22849 /// operation to try to flow through the rest of the combiner the fact that
22850 /// they're unused.
22851 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
22853 EVT VT = N->getValueType(0);
22855 // We only handle target-independent shuffles.
22856 // FIXME: It would be easy and harmless to use the target shuffle mask
22857 // extraction tool to support more.
22858 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
22861 auto *SVN = cast<ShuffleVectorSDNode>(N);
22862 ArrayRef<int> Mask = SVN->getMask();
22863 SDValue V1 = N->getOperand(0);
22864 SDValue V2 = N->getOperand(1);
22866 // We require the first shuffle operand to be the SUB node, and the second to
22867 // be the ADD node.
22868 // FIXME: We should support the commuted patterns.
22869 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
22872 // If there are other uses of these operations we can't fold them.
22873 if (!V1->hasOneUse() || !V2->hasOneUse())
22876 // Ensure that both operations have the same operands. Note that we can
22877 // commute the FADD operands.
22878 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
22879 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
22880 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
22883 // We're looking for blends between FADD and FSUB nodes. We insist on these
22884 // nodes being lined up in a specific expected pattern.
22885 if (!(isShuffleEquivalent(V1, V2, Mask, 0, 3) ||
22886 isShuffleEquivalent(V1, V2, Mask, 0, 5, 2, 7) ||
22887 isShuffleEquivalent(V1, V2, Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
22890 // Only specific types are legal at this point, assert so we notice if and
22891 // when these change.
22892 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
22893 VT == MVT::v4f64) &&
22894 "Unknown vector type encountered!");
22896 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
22899 /// PerformShuffleCombine - Performs several different shuffle combines.
22900 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
22901 TargetLowering::DAGCombinerInfo &DCI,
22902 const X86Subtarget *Subtarget) {
22904 SDValue N0 = N->getOperand(0);
22905 SDValue N1 = N->getOperand(1);
22906 EVT VT = N->getValueType(0);
22908 // Don't create instructions with illegal types after legalize types has run.
22909 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22910 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
22913 // If we have legalized the vector types, look for blends of FADD and FSUB
22914 // nodes that we can fuse into an ADDSUB node.
22915 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
22916 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
22919 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
22920 if (Subtarget->hasFp256() && VT.is256BitVector() &&
22921 N->getOpcode() == ISD::VECTOR_SHUFFLE)
22922 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
22924 // During Type Legalization, when promoting illegal vector types,
22925 // the backend might introduce new shuffle dag nodes and bitcasts.
22927 // This code performs the following transformation:
22928 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
22929 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
22931 // We do this only if both the bitcast and the BINOP dag nodes have
22932 // one use. Also, perform this transformation only if the new binary
22933 // operation is legal. This is to avoid introducing dag nodes that
22934 // potentially need to be further expanded (or custom lowered) into a
22935 // less optimal sequence of dag nodes.
22936 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
22937 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
22938 N0.getOpcode() == ISD::BITCAST) {
22939 SDValue BC0 = N0.getOperand(0);
22940 EVT SVT = BC0.getValueType();
22941 unsigned Opcode = BC0.getOpcode();
22942 unsigned NumElts = VT.getVectorNumElements();
22944 if (BC0.hasOneUse() && SVT.isVector() &&
22945 SVT.getVectorNumElements() * 2 == NumElts &&
22946 TLI.isOperationLegal(Opcode, VT)) {
22947 bool CanFold = false;
22959 unsigned SVTNumElts = SVT.getVectorNumElements();
22960 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22961 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
22962 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
22963 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
22964 CanFold = SVOp->getMaskElt(i) < 0;
22967 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
22968 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
22969 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
22970 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
22975 // Only handle 128 wide vector from here on.
22976 if (!VT.is128BitVector())
22979 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
22980 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
22981 // consecutive, non-overlapping, and in the right order.
22982 SmallVector<SDValue, 16> Elts;
22983 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
22984 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
22986 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
22990 if (isTargetShuffle(N->getOpcode())) {
22992 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
22993 if (Shuffle.getNode())
22996 // Try recursively combining arbitrary sequences of x86 shuffle
22997 // instructions into higher-order shuffles. We do this after combining
22998 // specific PSHUF instruction sequences into their minimal form so that we
22999 // can evaluate how many specialized shuffle instructions are involved in
23000 // a particular chain.
23001 SmallVector<int, 1> NonceMask; // Just a placeholder.
23002 NonceMask.push_back(0);
23003 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
23004 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
23006 return SDValue(); // This routine will use CombineTo to replace N.
23012 /// PerformTruncateCombine - Converts truncate operation to
23013 /// a sequence of vector shuffle operations.
23014 /// It is possible when we truncate 256-bit vector to 128-bit vector
23015 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
23016 TargetLowering::DAGCombinerInfo &DCI,
23017 const X86Subtarget *Subtarget) {
23021 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
23022 /// specific shuffle of a load can be folded into a single element load.
23023 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
23024 /// shuffles have been custom lowered so we need to handle those here.
23025 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
23026 TargetLowering::DAGCombinerInfo &DCI) {
23027 if (DCI.isBeforeLegalizeOps())
23030 SDValue InVec = N->getOperand(0);
23031 SDValue EltNo = N->getOperand(1);
23033 if (!isa<ConstantSDNode>(EltNo))
23036 EVT OriginalVT = InVec.getValueType();
23038 if (InVec.getOpcode() == ISD::BITCAST) {
23039 // Don't duplicate a load with other uses.
23040 if (!InVec.hasOneUse())
23042 EVT BCVT = InVec.getOperand(0).getValueType();
23043 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
23045 InVec = InVec.getOperand(0);
23048 EVT CurrentVT = InVec.getValueType();
23050 if (!isTargetShuffle(InVec.getOpcode()))
23053 // Don't duplicate a load with other uses.
23054 if (!InVec.hasOneUse())
23057 SmallVector<int, 16> ShuffleMask;
23059 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
23060 ShuffleMask, UnaryShuffle))
23063 // Select the input vector, guarding against out of range extract vector.
23064 unsigned NumElems = CurrentVT.getVectorNumElements();
23065 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
23066 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
23067 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
23068 : InVec.getOperand(1);
23070 // If inputs to shuffle are the same for both ops, then allow 2 uses
23071 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
23072 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
23074 if (LdNode.getOpcode() == ISD::BITCAST) {
23075 // Don't duplicate a load with other uses.
23076 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
23079 AllowedUses = 1; // only allow 1 load use if we have a bitcast
23080 LdNode = LdNode.getOperand(0);
23083 if (!ISD::isNormalLoad(LdNode.getNode()))
23086 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
23088 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
23091 EVT EltVT = N->getValueType(0);
23092 // If there's a bitcast before the shuffle, check if the load type and
23093 // alignment is valid.
23094 unsigned Align = LN0->getAlignment();
23095 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23096 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
23097 EltVT.getTypeForEVT(*DAG.getContext()));
23099 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
23102 // All checks match so transform back to vector_shuffle so that DAG combiner
23103 // can finish the job
23106 // Create shuffle node taking into account the case that its a unary shuffle
23107 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
23108 : InVec.getOperand(1);
23109 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
23110 InVec.getOperand(0), Shuffle,
23112 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
23113 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
23117 /// \brief Detect bitcasts between i32 to x86mmx low word. Since MMX types are
23118 /// special and don't usually play with other vector types, it's better to
23119 /// handle them early to be sure we emit efficient code by avoiding
23120 /// store-load conversions.
23121 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG) {
23122 if (N->getValueType(0) != MVT::x86mmx ||
23123 N->getOperand(0)->getOpcode() != ISD::BUILD_VECTOR ||
23124 N->getOperand(0)->getValueType(0) != MVT::v2i32)
23127 SDValue V = N->getOperand(0);
23128 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1));
23129 if (C && C->getZExtValue() == 0 && V.getOperand(0).getValueType() == MVT::i32)
23130 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(V.getOperand(0)),
23131 N->getValueType(0), V.getOperand(0));
23136 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
23137 /// generation and convert it from being a bunch of shuffles and extracts
23138 /// into a somewhat faster sequence. For i686, the best sequence is apparently
23139 /// storing the value and loading scalars back, while for x64 we should
23140 /// use 64-bit extracts and shifts.
23141 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
23142 TargetLowering::DAGCombinerInfo &DCI) {
23143 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
23144 if (NewOp.getNode())
23147 SDValue InputVector = N->getOperand(0);
23149 // Detect mmx to i32 conversion through a v2i32 elt extract.
23150 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
23151 N->getValueType(0) == MVT::i32 &&
23152 InputVector.getValueType() == MVT::v2i32) {
23154 // The bitcast source is a direct mmx result.
23155 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
23156 if (MMXSrc.getValueType() == MVT::x86mmx)
23157 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23158 N->getValueType(0),
23159 InputVector.getNode()->getOperand(0));
23161 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
23162 SDValue MMXSrcOp = MMXSrc.getOperand(0);
23163 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
23164 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
23165 MMXSrcOp.getOpcode() == ISD::BITCAST &&
23166 MMXSrcOp.getValueType() == MVT::v1i64 &&
23167 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
23168 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23169 N->getValueType(0),
23170 MMXSrcOp.getOperand(0));
23173 // Only operate on vectors of 4 elements, where the alternative shuffling
23174 // gets to be more expensive.
23175 if (InputVector.getValueType() != MVT::v4i32)
23178 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
23179 // single use which is a sign-extend or zero-extend, and all elements are
23181 SmallVector<SDNode *, 4> Uses;
23182 unsigned ExtractedElements = 0;
23183 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
23184 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
23185 if (UI.getUse().getResNo() != InputVector.getResNo())
23188 SDNode *Extract = *UI;
23189 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23192 if (Extract->getValueType(0) != MVT::i32)
23194 if (!Extract->hasOneUse())
23196 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
23197 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
23199 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
23202 // Record which element was extracted.
23203 ExtractedElements |=
23204 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
23206 Uses.push_back(Extract);
23209 // If not all the elements were used, this may not be worthwhile.
23210 if (ExtractedElements != 15)
23213 // Ok, we've now decided to do the transformation.
23214 // If 64-bit shifts are legal, use the extract-shift sequence,
23215 // otherwise bounce the vector off the cache.
23216 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23218 SDLoc dl(InputVector);
23220 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
23221 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
23222 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
23223 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23224 DAG.getConstant(0, VecIdxTy));
23225 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23226 DAG.getConstant(1, VecIdxTy));
23228 SDValue ShAmt = DAG.getConstant(32,
23229 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
23230 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
23231 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23232 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
23233 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
23234 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23235 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
23237 // Store the value to a temporary stack slot.
23238 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
23239 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
23240 MachinePointerInfo(), false, false, 0);
23242 EVT ElementType = InputVector.getValueType().getVectorElementType();
23243 unsigned EltSize = ElementType.getSizeInBits() / 8;
23245 // Replace each use (extract) with a load of the appropriate element.
23246 for (unsigned i = 0; i < 4; ++i) {
23247 uint64_t Offset = EltSize * i;
23248 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
23250 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
23251 StackPtr, OffsetVal);
23253 // Load the scalar.
23254 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
23255 ScalarAddr, MachinePointerInfo(),
23256 false, false, false, 0);
23261 // Replace the extracts
23262 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
23263 UE = Uses.end(); UI != UE; ++UI) {
23264 SDNode *Extract = *UI;
23266 SDValue Idx = Extract->getOperand(1);
23267 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
23268 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
23271 // The replacement was made in place; don't return anything.
23275 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
23276 static std::pair<unsigned, bool>
23277 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
23278 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
23279 if (!VT.isVector())
23280 return std::make_pair(0, false);
23282 bool NeedSplit = false;
23283 switch (VT.getSimpleVT().SimpleTy) {
23284 default: return std::make_pair(0, false);
23287 if (!Subtarget->hasVLX())
23288 return std::make_pair(0, false);
23292 if (!Subtarget->hasBWI())
23293 return std::make_pair(0, false);
23297 if (!Subtarget->hasAVX512())
23298 return std::make_pair(0, false);
23303 if (!Subtarget->hasAVX2())
23305 if (!Subtarget->hasAVX())
23306 return std::make_pair(0, false);
23311 if (!Subtarget->hasSSE2())
23312 return std::make_pair(0, false);
23315 // SSE2 has only a small subset of the operations.
23316 bool hasUnsigned = Subtarget->hasSSE41() ||
23317 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23318 bool hasSigned = Subtarget->hasSSE41() ||
23319 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23321 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23324 // Check for x CC y ? x : y.
23325 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23326 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23331 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23334 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23337 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23340 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23342 // Check for x CC y ? y : x -- a min/max with reversed arms.
23343 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23344 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23349 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23352 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23355 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23358 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23362 return std::make_pair(Opc, NeedSplit);
23366 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23367 const X86Subtarget *Subtarget) {
23369 SDValue Cond = N->getOperand(0);
23370 SDValue LHS = N->getOperand(1);
23371 SDValue RHS = N->getOperand(2);
23373 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23374 SDValue CondSrc = Cond->getOperand(0);
23375 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23376 Cond = CondSrc->getOperand(0);
23379 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23382 // A vselect where all conditions and data are constants can be optimized into
23383 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23384 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23385 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23388 unsigned MaskValue = 0;
23389 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23392 MVT VT = N->getSimpleValueType(0);
23393 unsigned NumElems = VT.getVectorNumElements();
23394 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23395 for (unsigned i = 0; i < NumElems; ++i) {
23396 // Be sure we emit undef where we can.
23397 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23398 ShuffleMask[i] = -1;
23400 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23403 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23404 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23406 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23409 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23411 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23412 TargetLowering::DAGCombinerInfo &DCI,
23413 const X86Subtarget *Subtarget) {
23415 SDValue Cond = N->getOperand(0);
23416 // Get the LHS/RHS of the select.
23417 SDValue LHS = N->getOperand(1);
23418 SDValue RHS = N->getOperand(2);
23419 EVT VT = LHS.getValueType();
23420 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23422 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23423 // instructions match the semantics of the common C idiom x<y?x:y but not
23424 // x<=y?x:y, because of how they handle negative zero (which can be
23425 // ignored in unsafe-math mode).
23426 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23427 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23428 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23429 (Subtarget->hasSSE2() ||
23430 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23431 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23433 unsigned Opcode = 0;
23434 // Check for x CC y ? x : y.
23435 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23436 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23440 // Converting this to a min would handle NaNs incorrectly, and swapping
23441 // the operands would cause it to handle comparisons between positive
23442 // and negative zero incorrectly.
23443 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23444 if (!DAG.getTarget().Options.UnsafeFPMath &&
23445 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23447 std::swap(LHS, RHS);
23449 Opcode = X86ISD::FMIN;
23452 // Converting this to a min would handle comparisons between positive
23453 // and negative zero incorrectly.
23454 if (!DAG.getTarget().Options.UnsafeFPMath &&
23455 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23457 Opcode = X86ISD::FMIN;
23460 // Converting this to a min would handle both negative zeros and NaNs
23461 // incorrectly, but we can swap the operands to fix both.
23462 std::swap(LHS, RHS);
23466 Opcode = X86ISD::FMIN;
23470 // Converting this to a max would handle comparisons between positive
23471 // and negative zero incorrectly.
23472 if (!DAG.getTarget().Options.UnsafeFPMath &&
23473 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23475 Opcode = X86ISD::FMAX;
23478 // Converting this to a max would handle NaNs incorrectly, and swapping
23479 // the operands would cause it to handle comparisons between positive
23480 // and negative zero incorrectly.
23481 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23482 if (!DAG.getTarget().Options.UnsafeFPMath &&
23483 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23485 std::swap(LHS, RHS);
23487 Opcode = X86ISD::FMAX;
23490 // Converting this to a max would handle both negative zeros and NaNs
23491 // incorrectly, but we can swap the operands to fix both.
23492 std::swap(LHS, RHS);
23496 Opcode = X86ISD::FMAX;
23499 // Check for x CC y ? y : x -- a min/max with reversed arms.
23500 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23501 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23505 // Converting this to a min would handle comparisons between positive
23506 // and negative zero incorrectly, and swapping the operands would
23507 // cause it to handle NaNs incorrectly.
23508 if (!DAG.getTarget().Options.UnsafeFPMath &&
23509 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23510 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23512 std::swap(LHS, RHS);
23514 Opcode = X86ISD::FMIN;
23517 // Converting this to a min would handle NaNs incorrectly.
23518 if (!DAG.getTarget().Options.UnsafeFPMath &&
23519 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23521 Opcode = X86ISD::FMIN;
23524 // Converting this to a min would handle both negative zeros and NaNs
23525 // incorrectly, but we can swap the operands to fix both.
23526 std::swap(LHS, RHS);
23530 Opcode = X86ISD::FMIN;
23534 // Converting this to a max would handle NaNs incorrectly.
23535 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23537 Opcode = X86ISD::FMAX;
23540 // Converting this to a max would handle comparisons between positive
23541 // and negative zero incorrectly, and swapping the operands would
23542 // cause it to handle NaNs incorrectly.
23543 if (!DAG.getTarget().Options.UnsafeFPMath &&
23544 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23545 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23547 std::swap(LHS, RHS);
23549 Opcode = X86ISD::FMAX;
23552 // Converting this to a max would handle both negative zeros and NaNs
23553 // incorrectly, but we can swap the operands to fix both.
23554 std::swap(LHS, RHS);
23558 Opcode = X86ISD::FMAX;
23564 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23567 EVT CondVT = Cond.getValueType();
23568 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23569 CondVT.getVectorElementType() == MVT::i1) {
23570 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23571 // lowering on KNL. In this case we convert it to
23572 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23573 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23574 // Since SKX these selects have a proper lowering.
23575 EVT OpVT = LHS.getValueType();
23576 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23577 (OpVT.getVectorElementType() == MVT::i8 ||
23578 OpVT.getVectorElementType() == MVT::i16) &&
23579 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23580 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23581 DCI.AddToWorklist(Cond.getNode());
23582 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23585 // If this is a select between two integer constants, try to do some
23587 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23588 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23589 // Don't do this for crazy integer types.
23590 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23591 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23592 // so that TrueC (the true value) is larger than FalseC.
23593 bool NeedsCondInvert = false;
23595 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23596 // Efficiently invertible.
23597 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23598 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23599 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23600 NeedsCondInvert = true;
23601 std::swap(TrueC, FalseC);
23604 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23605 if (FalseC->getAPIntValue() == 0 &&
23606 TrueC->getAPIntValue().isPowerOf2()) {
23607 if (NeedsCondInvert) // Invert the condition if needed.
23608 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23609 DAG.getConstant(1, Cond.getValueType()));
23611 // Zero extend the condition if needed.
23612 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23614 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23615 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23616 DAG.getConstant(ShAmt, MVT::i8));
23619 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23620 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23621 if (NeedsCondInvert) // Invert the condition if needed.
23622 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23623 DAG.getConstant(1, Cond.getValueType()));
23625 // Zero extend the condition if needed.
23626 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23627 FalseC->getValueType(0), Cond);
23628 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23629 SDValue(FalseC, 0));
23632 // Optimize cases that will turn into an LEA instruction. This requires
23633 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23634 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23635 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23636 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23638 bool isFastMultiplier = false;
23640 switch ((unsigned char)Diff) {
23642 case 1: // result = add base, cond
23643 case 2: // result = lea base( , cond*2)
23644 case 3: // result = lea base(cond, cond*2)
23645 case 4: // result = lea base( , cond*4)
23646 case 5: // result = lea base(cond, cond*4)
23647 case 8: // result = lea base( , cond*8)
23648 case 9: // result = lea base(cond, cond*8)
23649 isFastMultiplier = true;
23654 if (isFastMultiplier) {
23655 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23656 if (NeedsCondInvert) // Invert the condition if needed.
23657 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23658 DAG.getConstant(1, Cond.getValueType()));
23660 // Zero extend the condition if needed.
23661 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23663 // Scale the condition by the difference.
23665 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23666 DAG.getConstant(Diff, Cond.getValueType()));
23668 // Add the base if non-zero.
23669 if (FalseC->getAPIntValue() != 0)
23670 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23671 SDValue(FalseC, 0));
23678 // Canonicalize max and min:
23679 // (x > y) ? x : y -> (x >= y) ? x : y
23680 // (x < y) ? x : y -> (x <= y) ? x : y
23681 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23682 // the need for an extra compare
23683 // against zero. e.g.
23684 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23686 // testl %edi, %edi
23688 // cmovgl %edi, %eax
23692 // cmovsl %eax, %edi
23693 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23694 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23695 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23696 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23701 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23702 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23703 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23704 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23709 // Early exit check
23710 if (!TLI.isTypeLegal(VT))
23713 // Match VSELECTs into subs with unsigned saturation.
23714 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23715 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23716 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23717 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23718 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23720 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23721 // left side invert the predicate to simplify logic below.
23723 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23725 CC = ISD::getSetCCInverse(CC, true);
23726 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23730 if (Other.getNode() && Other->getNumOperands() == 2 &&
23731 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23732 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23733 SDValue CondRHS = Cond->getOperand(1);
23735 // Look for a general sub with unsigned saturation first.
23736 // x >= y ? x-y : 0 --> subus x, y
23737 // x > y ? x-y : 0 --> subus x, y
23738 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23739 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23740 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23742 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23743 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23744 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23745 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23746 // If the RHS is a constant we have to reverse the const
23747 // canonicalization.
23748 // x > C-1 ? x+-C : 0 --> subus x, C
23749 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23750 CondRHSConst->getAPIntValue() ==
23751 (-OpRHSConst->getAPIntValue() - 1))
23752 return DAG.getNode(
23753 X86ISD::SUBUS, DL, VT, OpLHS,
23754 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23756 // Another special case: If C was a sign bit, the sub has been
23757 // canonicalized into a xor.
23758 // FIXME: Would it be better to use computeKnownBits to determine
23759 // whether it's safe to decanonicalize the xor?
23760 // x s< 0 ? x^C : 0 --> subus x, C
23761 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23762 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23763 OpRHSConst->getAPIntValue().isSignBit())
23764 // Note that we have to rebuild the RHS constant here to ensure we
23765 // don't rely on particular values of undef lanes.
23766 return DAG.getNode(
23767 X86ISD::SUBUS, DL, VT, OpLHS,
23768 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23773 // Try to match a min/max vector operation.
23774 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23775 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23776 unsigned Opc = ret.first;
23777 bool NeedSplit = ret.second;
23779 if (Opc && NeedSplit) {
23780 unsigned NumElems = VT.getVectorNumElements();
23781 // Extract the LHS vectors
23782 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23783 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23785 // Extract the RHS vectors
23786 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23787 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23789 // Create min/max for each subvector
23790 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23791 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23793 // Merge the result
23794 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23796 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23799 // Simplify vector selection if condition value type matches vselect
23801 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23802 assert(Cond.getValueType().isVector() &&
23803 "vector select expects a vector selector!");
23805 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23806 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23808 // Try invert the condition if true value is not all 1s and false value
23810 if (!TValIsAllOnes && !FValIsAllZeros &&
23811 // Check if the selector will be produced by CMPP*/PCMP*
23812 Cond.getOpcode() == ISD::SETCC &&
23813 // Check if SETCC has already been promoted
23814 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23815 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23816 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23818 if (TValIsAllZeros || FValIsAllOnes) {
23819 SDValue CC = Cond.getOperand(2);
23820 ISD::CondCode NewCC =
23821 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23822 Cond.getOperand(0).getValueType().isInteger());
23823 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23824 std::swap(LHS, RHS);
23825 TValIsAllOnes = FValIsAllOnes;
23826 FValIsAllZeros = TValIsAllZeros;
23830 if (TValIsAllOnes || FValIsAllZeros) {
23833 if (TValIsAllOnes && FValIsAllZeros)
23835 else if (TValIsAllOnes)
23836 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23837 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23838 else if (FValIsAllZeros)
23839 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23840 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23842 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23846 // If we know that this node is legal then we know that it is going to be
23847 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23848 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23849 // to simplify previous instructions.
23850 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23851 !DCI.isBeforeLegalize() &&
23852 // We explicitly check against v8i16 and v16i16 because, although
23853 // they're marked as Custom, they might only be legal when Cond is a
23854 // build_vector of constants. This will be taken care in a later
23856 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
23857 VT != MVT::v8i16) &&
23858 // Don't optimize vector of constants. Those are handled by
23859 // the generic code and all the bits must be properly set for
23860 // the generic optimizer.
23861 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
23862 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
23864 // Don't optimize vector selects that map to mask-registers.
23868 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
23869 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
23871 APInt KnownZero, KnownOne;
23872 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
23873 DCI.isBeforeLegalizeOps());
23874 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
23875 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
23877 // If we changed the computation somewhere in the DAG, this change
23878 // will affect all users of Cond.
23879 // Make sure it is fine and update all the nodes so that we do not
23880 // use the generic VSELECT anymore. Otherwise, we may perform
23881 // wrong optimizations as we messed up with the actual expectation
23882 // for the vector boolean values.
23883 if (Cond != TLO.Old) {
23884 // Check all uses of that condition operand to check whether it will be
23885 // consumed by non-BLEND instructions, which may depend on all bits are
23887 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23889 if (I->getOpcode() != ISD::VSELECT)
23890 // TODO: Add other opcodes eventually lowered into BLEND.
23893 // Update all the users of the condition, before committing the change,
23894 // so that the VSELECT optimizations that expect the correct vector
23895 // boolean value will not be triggered.
23896 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23898 DAG.ReplaceAllUsesOfValueWith(
23900 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
23901 Cond, I->getOperand(1), I->getOperand(2)));
23902 DCI.CommitTargetLoweringOpt(TLO);
23905 // At this point, only Cond is changed. Change the condition
23906 // just for N to keep the opportunity to optimize all other
23907 // users their own way.
23908 DAG.ReplaceAllUsesOfValueWith(
23910 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
23911 TLO.New, N->getOperand(1), N->getOperand(2)));
23916 // We should generate an X86ISD::BLENDI from a vselect if its argument
23917 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
23918 // constants. This specific pattern gets generated when we split a
23919 // selector for a 512 bit vector in a machine without AVX512 (but with
23920 // 256-bit vectors), during legalization:
23922 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
23924 // Iff we find this pattern and the build_vectors are built from
23925 // constants, we translate the vselect into a shuffle_vector that we
23926 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
23927 if ((N->getOpcode() == ISD::VSELECT ||
23928 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
23929 !DCI.isBeforeLegalize()) {
23930 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
23931 if (Shuffle.getNode())
23938 // Check whether a boolean test is testing a boolean value generated by
23939 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
23942 // Simplify the following patterns:
23943 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
23944 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
23945 // to (Op EFLAGS Cond)
23947 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
23948 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
23949 // to (Op EFLAGS !Cond)
23951 // where Op could be BRCOND or CMOV.
23953 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
23954 // Quit if not CMP and SUB with its value result used.
23955 if (Cmp.getOpcode() != X86ISD::CMP &&
23956 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
23959 // Quit if not used as a boolean value.
23960 if (CC != X86::COND_E && CC != X86::COND_NE)
23963 // Check CMP operands. One of them should be 0 or 1 and the other should be
23964 // an SetCC or extended from it.
23965 SDValue Op1 = Cmp.getOperand(0);
23966 SDValue Op2 = Cmp.getOperand(1);
23969 const ConstantSDNode* C = nullptr;
23970 bool needOppositeCond = (CC == X86::COND_E);
23971 bool checkAgainstTrue = false; // Is it a comparison against 1?
23973 if ((C = dyn_cast<ConstantSDNode>(Op1)))
23975 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
23977 else // Quit if all operands are not constants.
23980 if (C->getZExtValue() == 1) {
23981 needOppositeCond = !needOppositeCond;
23982 checkAgainstTrue = true;
23983 } else if (C->getZExtValue() != 0)
23984 // Quit if the constant is neither 0 or 1.
23987 bool truncatedToBoolWithAnd = false;
23988 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
23989 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
23990 SetCC.getOpcode() == ISD::TRUNCATE ||
23991 SetCC.getOpcode() == ISD::AND) {
23992 if (SetCC.getOpcode() == ISD::AND) {
23994 ConstantSDNode *CS;
23995 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
23996 CS->getZExtValue() == 1)
23998 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
23999 CS->getZExtValue() == 1)
24003 SetCC = SetCC.getOperand(OpIdx);
24004 truncatedToBoolWithAnd = true;
24006 SetCC = SetCC.getOperand(0);
24009 switch (SetCC.getOpcode()) {
24010 case X86ISD::SETCC_CARRY:
24011 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
24012 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
24013 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
24014 // truncated to i1 using 'and'.
24015 if (checkAgainstTrue && !truncatedToBoolWithAnd)
24017 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
24018 "Invalid use of SETCC_CARRY!");
24020 case X86ISD::SETCC:
24021 // Set the condition code or opposite one if necessary.
24022 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
24023 if (needOppositeCond)
24024 CC = X86::GetOppositeBranchCondition(CC);
24025 return SetCC.getOperand(1);
24026 case X86ISD::CMOV: {
24027 // Check whether false/true value has canonical one, i.e. 0 or 1.
24028 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
24029 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
24030 // Quit if true value is not a constant.
24033 // Quit if false value is not a constant.
24035 SDValue Op = SetCC.getOperand(0);
24036 // Skip 'zext' or 'trunc' node.
24037 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
24038 Op.getOpcode() == ISD::TRUNCATE)
24039 Op = Op.getOperand(0);
24040 // A special case for rdrand/rdseed, where 0 is set if false cond is
24042 if ((Op.getOpcode() != X86ISD::RDRAND &&
24043 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
24046 // Quit if false value is not the constant 0 or 1.
24047 bool FValIsFalse = true;
24048 if (FVal && FVal->getZExtValue() != 0) {
24049 if (FVal->getZExtValue() != 1)
24051 // If FVal is 1, opposite cond is needed.
24052 needOppositeCond = !needOppositeCond;
24053 FValIsFalse = false;
24055 // Quit if TVal is not the constant opposite of FVal.
24056 if (FValIsFalse && TVal->getZExtValue() != 1)
24058 if (!FValIsFalse && TVal->getZExtValue() != 0)
24060 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
24061 if (needOppositeCond)
24062 CC = X86::GetOppositeBranchCondition(CC);
24063 return SetCC.getOperand(3);
24070 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
24071 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
24072 TargetLowering::DAGCombinerInfo &DCI,
24073 const X86Subtarget *Subtarget) {
24076 // If the flag operand isn't dead, don't touch this CMOV.
24077 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
24080 SDValue FalseOp = N->getOperand(0);
24081 SDValue TrueOp = N->getOperand(1);
24082 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
24083 SDValue Cond = N->getOperand(3);
24085 if (CC == X86::COND_E || CC == X86::COND_NE) {
24086 switch (Cond.getOpcode()) {
24090 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
24091 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
24092 return (CC == X86::COND_E) ? FalseOp : TrueOp;
24098 Flags = checkBoolTestSetCCCombine(Cond, CC);
24099 if (Flags.getNode() &&
24100 // Extra check as FCMOV only supports a subset of X86 cond.
24101 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
24102 SDValue Ops[] = { FalseOp, TrueOp,
24103 DAG.getConstant(CC, MVT::i8), Flags };
24104 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
24107 // If this is a select between two integer constants, try to do some
24108 // optimizations. Note that the operands are ordered the opposite of SELECT
24110 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
24111 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
24112 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
24113 // larger than FalseC (the false value).
24114 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
24115 CC = X86::GetOppositeBranchCondition(CC);
24116 std::swap(TrueC, FalseC);
24117 std::swap(TrueOp, FalseOp);
24120 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
24121 // This is efficient for any integer data type (including i8/i16) and
24123 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
24124 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24125 DAG.getConstant(CC, MVT::i8), Cond);
24127 // Zero extend the condition if needed.
24128 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
24130 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
24131 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
24132 DAG.getConstant(ShAmt, MVT::i8));
24133 if (N->getNumValues() == 2) // Dead flag value?
24134 return DCI.CombineTo(N, Cond, SDValue());
24138 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
24139 // for any integer data type, including i8/i16.
24140 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
24141 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24142 DAG.getConstant(CC, MVT::i8), Cond);
24144 // Zero extend the condition if needed.
24145 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
24146 FalseC->getValueType(0), Cond);
24147 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24148 SDValue(FalseC, 0));
24150 if (N->getNumValues() == 2) // Dead flag value?
24151 return DCI.CombineTo(N, Cond, SDValue());
24155 // Optimize cases that will turn into an LEA instruction. This requires
24156 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
24157 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
24158 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
24159 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
24161 bool isFastMultiplier = false;
24163 switch ((unsigned char)Diff) {
24165 case 1: // result = add base, cond
24166 case 2: // result = lea base( , cond*2)
24167 case 3: // result = lea base(cond, cond*2)
24168 case 4: // result = lea base( , cond*4)
24169 case 5: // result = lea base(cond, cond*4)
24170 case 8: // result = lea base( , cond*8)
24171 case 9: // result = lea base(cond, cond*8)
24172 isFastMultiplier = true;
24177 if (isFastMultiplier) {
24178 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
24179 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24180 DAG.getConstant(CC, MVT::i8), Cond);
24181 // Zero extend the condition if needed.
24182 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
24184 // Scale the condition by the difference.
24186 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
24187 DAG.getConstant(Diff, Cond.getValueType()));
24189 // Add the base if non-zero.
24190 if (FalseC->getAPIntValue() != 0)
24191 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24192 SDValue(FalseC, 0));
24193 if (N->getNumValues() == 2) // Dead flag value?
24194 return DCI.CombineTo(N, Cond, SDValue());
24201 // Handle these cases:
24202 // (select (x != c), e, c) -> select (x != c), e, x),
24203 // (select (x == c), c, e) -> select (x == c), x, e)
24204 // where the c is an integer constant, and the "select" is the combination
24205 // of CMOV and CMP.
24207 // The rationale for this change is that the conditional-move from a constant
24208 // needs two instructions, however, conditional-move from a register needs
24209 // only one instruction.
24211 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
24212 // some instruction-combining opportunities. This opt needs to be
24213 // postponed as late as possible.
24215 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
24216 // the DCI.xxxx conditions are provided to postpone the optimization as
24217 // late as possible.
24219 ConstantSDNode *CmpAgainst = nullptr;
24220 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
24221 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
24222 !isa<ConstantSDNode>(Cond.getOperand(0))) {
24224 if (CC == X86::COND_NE &&
24225 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
24226 CC = X86::GetOppositeBranchCondition(CC);
24227 std::swap(TrueOp, FalseOp);
24230 if (CC == X86::COND_E &&
24231 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
24232 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24233 DAG.getConstant(CC, MVT::i8), Cond };
24234 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24242 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
24243 const X86Subtarget *Subtarget) {
24244 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
24246 default: return SDValue();
24247 // SSE/AVX/AVX2 blend intrinsics.
24248 case Intrinsic::x86_avx2_pblendvb:
24249 case Intrinsic::x86_avx2_pblendw:
24250 case Intrinsic::x86_avx2_pblendd_128:
24251 case Intrinsic::x86_avx2_pblendd_256:
24252 // Don't try to simplify this intrinsic if we don't have AVX2.
24253 if (!Subtarget->hasAVX2())
24256 case Intrinsic::x86_avx_blend_pd_256:
24257 case Intrinsic::x86_avx_blend_ps_256:
24258 case Intrinsic::x86_avx_blendv_pd_256:
24259 case Intrinsic::x86_avx_blendv_ps_256:
24260 // Don't try to simplify this intrinsic if we don't have AVX.
24261 if (!Subtarget->hasAVX())
24264 case Intrinsic::x86_sse41_pblendw:
24265 case Intrinsic::x86_sse41_blendpd:
24266 case Intrinsic::x86_sse41_blendps:
24267 case Intrinsic::x86_sse41_blendvps:
24268 case Intrinsic::x86_sse41_blendvpd:
24269 case Intrinsic::x86_sse41_pblendvb: {
24270 SDValue Op0 = N->getOperand(1);
24271 SDValue Op1 = N->getOperand(2);
24272 SDValue Mask = N->getOperand(3);
24274 // Don't try to simplify this intrinsic if we don't have SSE4.1.
24275 if (!Subtarget->hasSSE41())
24278 // fold (blend A, A, Mask) -> A
24281 // fold (blend A, B, allZeros) -> A
24282 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
24284 // fold (blend A, B, allOnes) -> B
24285 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24288 // Simplify the case where the mask is a constant i32 value.
24289 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
24290 if (C->isNullValue())
24292 if (C->isAllOnesValue())
24299 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24300 case Intrinsic::x86_sse2_psrai_w:
24301 case Intrinsic::x86_sse2_psrai_d:
24302 case Intrinsic::x86_avx2_psrai_w:
24303 case Intrinsic::x86_avx2_psrai_d:
24304 case Intrinsic::x86_sse2_psra_w:
24305 case Intrinsic::x86_sse2_psra_d:
24306 case Intrinsic::x86_avx2_psra_w:
24307 case Intrinsic::x86_avx2_psra_d: {
24308 SDValue Op0 = N->getOperand(1);
24309 SDValue Op1 = N->getOperand(2);
24310 EVT VT = Op0.getValueType();
24311 assert(VT.isVector() && "Expected a vector type!");
24313 if (isa<BuildVectorSDNode>(Op1))
24314 Op1 = Op1.getOperand(0);
24316 if (!isa<ConstantSDNode>(Op1))
24319 EVT SVT = VT.getVectorElementType();
24320 unsigned SVTBits = SVT.getSizeInBits();
24322 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24323 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24324 uint64_t ShAmt = C.getZExtValue();
24326 // Don't try to convert this shift into a ISD::SRA if the shift
24327 // count is bigger than or equal to the element size.
24328 if (ShAmt >= SVTBits)
24331 // Trivial case: if the shift count is zero, then fold this
24332 // into the first operand.
24336 // Replace this packed shift intrinsic with a target independent
24338 SDValue Splat = DAG.getConstant(C, VT);
24339 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24344 /// PerformMulCombine - Optimize a single multiply with constant into two
24345 /// in order to implement it with two cheaper instructions, e.g.
24346 /// LEA + SHL, LEA + LEA.
24347 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24348 TargetLowering::DAGCombinerInfo &DCI) {
24349 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24352 EVT VT = N->getValueType(0);
24353 if (VT != MVT::i64 && VT != MVT::i32)
24356 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24359 uint64_t MulAmt = C->getZExtValue();
24360 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24363 uint64_t MulAmt1 = 0;
24364 uint64_t MulAmt2 = 0;
24365 if ((MulAmt % 9) == 0) {
24367 MulAmt2 = MulAmt / 9;
24368 } else if ((MulAmt % 5) == 0) {
24370 MulAmt2 = MulAmt / 5;
24371 } else if ((MulAmt % 3) == 0) {
24373 MulAmt2 = MulAmt / 3;
24376 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24379 if (isPowerOf2_64(MulAmt2) &&
24380 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24381 // If second multiplifer is pow2, issue it first. We want the multiply by
24382 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24384 std::swap(MulAmt1, MulAmt2);
24387 if (isPowerOf2_64(MulAmt1))
24388 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24389 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24391 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24392 DAG.getConstant(MulAmt1, VT));
24394 if (isPowerOf2_64(MulAmt2))
24395 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24396 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24398 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24399 DAG.getConstant(MulAmt2, VT));
24401 // Do not add new nodes to DAG combiner worklist.
24402 DCI.CombineTo(N, NewMul, false);
24407 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24408 SDValue N0 = N->getOperand(0);
24409 SDValue N1 = N->getOperand(1);
24410 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24411 EVT VT = N0.getValueType();
24413 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24414 // since the result of setcc_c is all zero's or all ones.
24415 if (VT.isInteger() && !VT.isVector() &&
24416 N1C && N0.getOpcode() == ISD::AND &&
24417 N0.getOperand(1).getOpcode() == ISD::Constant) {
24418 SDValue N00 = N0.getOperand(0);
24419 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24420 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24421 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24422 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24423 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24424 APInt ShAmt = N1C->getAPIntValue();
24425 Mask = Mask.shl(ShAmt);
24427 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24428 N00, DAG.getConstant(Mask, VT));
24432 // Hardware support for vector shifts is sparse which makes us scalarize the
24433 // vector operations in many cases. Also, on sandybridge ADD is faster than
24435 // (shl V, 1) -> add V,V
24436 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24437 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24438 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24439 // We shift all of the values by one. In many cases we do not have
24440 // hardware support for this operation. This is better expressed as an ADD
24442 if (N1SplatC->getZExtValue() == 1)
24443 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24449 /// \brief Returns a vector of 0s if the node in input is a vector logical
24450 /// shift by a constant amount which is known to be bigger than or equal
24451 /// to the vector element size in bits.
24452 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24453 const X86Subtarget *Subtarget) {
24454 EVT VT = N->getValueType(0);
24456 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24457 (!Subtarget->hasInt256() ||
24458 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24461 SDValue Amt = N->getOperand(1);
24463 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24464 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24465 APInt ShiftAmt = AmtSplat->getAPIntValue();
24466 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24468 // SSE2/AVX2 logical shifts always return a vector of 0s
24469 // if the shift amount is bigger than or equal to
24470 // the element size. The constant shift amount will be
24471 // encoded as a 8-bit immediate.
24472 if (ShiftAmt.trunc(8).uge(MaxAmount))
24473 return getZeroVector(VT, Subtarget, DAG, DL);
24479 /// PerformShiftCombine - Combine shifts.
24480 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24481 TargetLowering::DAGCombinerInfo &DCI,
24482 const X86Subtarget *Subtarget) {
24483 if (N->getOpcode() == ISD::SHL) {
24484 SDValue V = PerformSHLCombine(N, DAG);
24485 if (V.getNode()) return V;
24488 if (N->getOpcode() != ISD::SRA) {
24489 // Try to fold this logical shift into a zero vector.
24490 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24491 if (V.getNode()) return V;
24497 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24498 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24499 // and friends. Likewise for OR -> CMPNEQSS.
24500 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24501 TargetLowering::DAGCombinerInfo &DCI,
24502 const X86Subtarget *Subtarget) {
24505 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24506 // we're requiring SSE2 for both.
24507 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24508 SDValue N0 = N->getOperand(0);
24509 SDValue N1 = N->getOperand(1);
24510 SDValue CMP0 = N0->getOperand(1);
24511 SDValue CMP1 = N1->getOperand(1);
24514 // The SETCCs should both refer to the same CMP.
24515 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24518 SDValue CMP00 = CMP0->getOperand(0);
24519 SDValue CMP01 = CMP0->getOperand(1);
24520 EVT VT = CMP00.getValueType();
24522 if (VT == MVT::f32 || VT == MVT::f64) {
24523 bool ExpectingFlags = false;
24524 // Check for any users that want flags:
24525 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24526 !ExpectingFlags && UI != UE; ++UI)
24527 switch (UI->getOpcode()) {
24532 ExpectingFlags = true;
24534 case ISD::CopyToReg:
24535 case ISD::SIGN_EXTEND:
24536 case ISD::ZERO_EXTEND:
24537 case ISD::ANY_EXTEND:
24541 if (!ExpectingFlags) {
24542 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24543 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24545 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24546 X86::CondCode tmp = cc0;
24551 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24552 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24553 // FIXME: need symbolic constants for these magic numbers.
24554 // See X86ATTInstPrinter.cpp:printSSECC().
24555 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24556 if (Subtarget->hasAVX512()) {
24557 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24558 CMP01, DAG.getConstant(x86cc, MVT::i8));
24559 if (N->getValueType(0) != MVT::i1)
24560 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24564 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24565 CMP00.getValueType(), CMP00, CMP01,
24566 DAG.getConstant(x86cc, MVT::i8));
24568 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24569 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24571 if (is64BitFP && !Subtarget->is64Bit()) {
24572 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24573 // 64-bit integer, since that's not a legal type. Since
24574 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24575 // bits, but can do this little dance to extract the lowest 32 bits
24576 // and work with those going forward.
24577 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24579 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24581 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24582 Vector32, DAG.getIntPtrConstant(0));
24586 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24587 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24588 DAG.getConstant(1, IntVT));
24589 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24590 return OneBitOfTruth;
24598 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24599 /// so it can be folded inside ANDNP.
24600 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24601 EVT VT = N->getValueType(0);
24603 // Match direct AllOnes for 128 and 256-bit vectors
24604 if (ISD::isBuildVectorAllOnes(N))
24607 // Look through a bit convert.
24608 if (N->getOpcode() == ISD::BITCAST)
24609 N = N->getOperand(0).getNode();
24611 // Sometimes the operand may come from a insert_subvector building a 256-bit
24613 if (VT.is256BitVector() &&
24614 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24615 SDValue V1 = N->getOperand(0);
24616 SDValue V2 = N->getOperand(1);
24618 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24619 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24620 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24621 ISD::isBuildVectorAllOnes(V2.getNode()))
24628 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24629 // register. In most cases we actually compare or select YMM-sized registers
24630 // and mixing the two types creates horrible code. This method optimizes
24631 // some of the transition sequences.
24632 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24633 TargetLowering::DAGCombinerInfo &DCI,
24634 const X86Subtarget *Subtarget) {
24635 EVT VT = N->getValueType(0);
24636 if (!VT.is256BitVector())
24639 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24640 N->getOpcode() == ISD::ZERO_EXTEND ||
24641 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24643 SDValue Narrow = N->getOperand(0);
24644 EVT NarrowVT = Narrow->getValueType(0);
24645 if (!NarrowVT.is128BitVector())
24648 if (Narrow->getOpcode() != ISD::XOR &&
24649 Narrow->getOpcode() != ISD::AND &&
24650 Narrow->getOpcode() != ISD::OR)
24653 SDValue N0 = Narrow->getOperand(0);
24654 SDValue N1 = Narrow->getOperand(1);
24657 // The Left side has to be a trunc.
24658 if (N0.getOpcode() != ISD::TRUNCATE)
24661 // The type of the truncated inputs.
24662 EVT WideVT = N0->getOperand(0)->getValueType(0);
24666 // The right side has to be a 'trunc' or a constant vector.
24667 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24668 ConstantSDNode *RHSConstSplat = nullptr;
24669 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24670 RHSConstSplat = RHSBV->getConstantSplatNode();
24671 if (!RHSTrunc && !RHSConstSplat)
24674 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24676 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24679 // Set N0 and N1 to hold the inputs to the new wide operation.
24680 N0 = N0->getOperand(0);
24681 if (RHSConstSplat) {
24682 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24683 SDValue(RHSConstSplat, 0));
24684 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24685 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24686 } else if (RHSTrunc) {
24687 N1 = N1->getOperand(0);
24690 // Generate the wide operation.
24691 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24692 unsigned Opcode = N->getOpcode();
24694 case ISD::ANY_EXTEND:
24696 case ISD::ZERO_EXTEND: {
24697 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24698 APInt Mask = APInt::getAllOnesValue(InBits);
24699 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24700 return DAG.getNode(ISD::AND, DL, VT,
24701 Op, DAG.getConstant(Mask, VT));
24703 case ISD::SIGN_EXTEND:
24704 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24705 Op, DAG.getValueType(NarrowVT));
24707 llvm_unreachable("Unexpected opcode");
24711 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24712 TargetLowering::DAGCombinerInfo &DCI,
24713 const X86Subtarget *Subtarget) {
24714 EVT VT = N->getValueType(0);
24715 if (DCI.isBeforeLegalizeOps())
24718 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24722 // Create BEXTR instructions
24723 // BEXTR is ((X >> imm) & (2**size-1))
24724 if (VT == MVT::i32 || VT == MVT::i64) {
24725 SDValue N0 = N->getOperand(0);
24726 SDValue N1 = N->getOperand(1);
24729 // Check for BEXTR.
24730 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24731 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24732 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24733 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24734 if (MaskNode && ShiftNode) {
24735 uint64_t Mask = MaskNode->getZExtValue();
24736 uint64_t Shift = ShiftNode->getZExtValue();
24737 if (isMask_64(Mask)) {
24738 uint64_t MaskSize = countPopulation(Mask);
24739 if (Shift + MaskSize <= VT.getSizeInBits())
24740 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24741 DAG.getConstant(Shift | (MaskSize << 8), VT));
24749 // Want to form ANDNP nodes:
24750 // 1) In the hopes of then easily combining them with OR and AND nodes
24751 // to form PBLEND/PSIGN.
24752 // 2) To match ANDN packed intrinsics
24753 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24756 SDValue N0 = N->getOperand(0);
24757 SDValue N1 = N->getOperand(1);
24760 // Check LHS for vnot
24761 if (N0.getOpcode() == ISD::XOR &&
24762 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24763 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24764 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24766 // Check RHS for vnot
24767 if (N1.getOpcode() == ISD::XOR &&
24768 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24769 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24770 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24775 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24776 TargetLowering::DAGCombinerInfo &DCI,
24777 const X86Subtarget *Subtarget) {
24778 if (DCI.isBeforeLegalizeOps())
24781 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24785 SDValue N0 = N->getOperand(0);
24786 SDValue N1 = N->getOperand(1);
24787 EVT VT = N->getValueType(0);
24789 // look for psign/blend
24790 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24791 if (!Subtarget->hasSSSE3() ||
24792 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24795 // Canonicalize pandn to RHS
24796 if (N0.getOpcode() == X86ISD::ANDNP)
24798 // or (and (m, y), (pandn m, x))
24799 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24800 SDValue Mask = N1.getOperand(0);
24801 SDValue X = N1.getOperand(1);
24803 if (N0.getOperand(0) == Mask)
24804 Y = N0.getOperand(1);
24805 if (N0.getOperand(1) == Mask)
24806 Y = N0.getOperand(0);
24808 // Check to see if the mask appeared in both the AND and ANDNP and
24812 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24813 // Look through mask bitcast.
24814 if (Mask.getOpcode() == ISD::BITCAST)
24815 Mask = Mask.getOperand(0);
24816 if (X.getOpcode() == ISD::BITCAST)
24817 X = X.getOperand(0);
24818 if (Y.getOpcode() == ISD::BITCAST)
24819 Y = Y.getOperand(0);
24821 EVT MaskVT = Mask.getValueType();
24823 // Validate that the Mask operand is a vector sra node.
24824 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24825 // there is no psrai.b
24826 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24827 unsigned SraAmt = ~0;
24828 if (Mask.getOpcode() == ISD::SRA) {
24829 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24830 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24831 SraAmt = AmtConst->getZExtValue();
24832 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24833 SDValue SraC = Mask.getOperand(1);
24834 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24836 if ((SraAmt + 1) != EltBits)
24841 // Now we know we at least have a plendvb with the mask val. See if
24842 // we can form a psignb/w/d.
24843 // psign = x.type == y.type == mask.type && y = sub(0, x);
24844 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24845 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24846 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24847 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24848 "Unsupported VT for PSIGN");
24849 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24850 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24852 // PBLENDVB only available on SSE 4.1
24853 if (!Subtarget->hasSSE41())
24856 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
24858 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
24859 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
24860 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
24861 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
24862 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24866 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
24869 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
24870 MachineFunction &MF = DAG.getMachineFunction();
24872 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
24874 // SHLD/SHRD instructions have lower register pressure, but on some
24875 // platforms they have higher latency than the equivalent
24876 // series of shifts/or that would otherwise be generated.
24877 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
24878 // have higher latencies and we are not optimizing for size.
24879 if (!OptForSize && Subtarget->isSHLDSlow())
24882 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
24884 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
24886 if (!N0.hasOneUse() || !N1.hasOneUse())
24889 SDValue ShAmt0 = N0.getOperand(1);
24890 if (ShAmt0.getValueType() != MVT::i8)
24892 SDValue ShAmt1 = N1.getOperand(1);
24893 if (ShAmt1.getValueType() != MVT::i8)
24895 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
24896 ShAmt0 = ShAmt0.getOperand(0);
24897 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
24898 ShAmt1 = ShAmt1.getOperand(0);
24901 unsigned Opc = X86ISD::SHLD;
24902 SDValue Op0 = N0.getOperand(0);
24903 SDValue Op1 = N1.getOperand(0);
24904 if (ShAmt0.getOpcode() == ISD::SUB) {
24905 Opc = X86ISD::SHRD;
24906 std::swap(Op0, Op1);
24907 std::swap(ShAmt0, ShAmt1);
24910 unsigned Bits = VT.getSizeInBits();
24911 if (ShAmt1.getOpcode() == ISD::SUB) {
24912 SDValue Sum = ShAmt1.getOperand(0);
24913 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
24914 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
24915 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
24916 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
24917 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
24918 return DAG.getNode(Opc, DL, VT,
24920 DAG.getNode(ISD::TRUNCATE, DL,
24923 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
24924 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
24926 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
24927 return DAG.getNode(Opc, DL, VT,
24928 N0.getOperand(0), N1.getOperand(0),
24929 DAG.getNode(ISD::TRUNCATE, DL,
24936 // Generate NEG and CMOV for integer abs.
24937 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
24938 EVT VT = N->getValueType(0);
24940 // Since X86 does not have CMOV for 8-bit integer, we don't convert
24941 // 8-bit integer abs to NEG and CMOV.
24942 if (VT.isInteger() && VT.getSizeInBits() == 8)
24945 SDValue N0 = N->getOperand(0);
24946 SDValue N1 = N->getOperand(1);
24949 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
24950 // and change it to SUB and CMOV.
24951 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
24952 N0.getOpcode() == ISD::ADD &&
24953 N0.getOperand(1) == N1 &&
24954 N1.getOpcode() == ISD::SRA &&
24955 N1.getOperand(0) == N0.getOperand(0))
24956 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
24957 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
24958 // Generate SUB & CMOV.
24959 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
24960 DAG.getConstant(0, VT), N0.getOperand(0));
24962 SDValue Ops[] = { N0.getOperand(0), Neg,
24963 DAG.getConstant(X86::COND_GE, MVT::i8),
24964 SDValue(Neg.getNode(), 1) };
24965 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
24970 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
24971 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
24972 TargetLowering::DAGCombinerInfo &DCI,
24973 const X86Subtarget *Subtarget) {
24974 if (DCI.isBeforeLegalizeOps())
24977 if (Subtarget->hasCMov()) {
24978 SDValue RV = performIntegerAbsCombine(N, DAG);
24986 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
24987 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
24988 TargetLowering::DAGCombinerInfo &DCI,
24989 const X86Subtarget *Subtarget) {
24990 LoadSDNode *Ld = cast<LoadSDNode>(N);
24991 EVT RegVT = Ld->getValueType(0);
24992 EVT MemVT = Ld->getMemoryVT();
24994 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24996 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
24997 // into two 16-byte operations.
24998 ISD::LoadExtType Ext = Ld->getExtensionType();
24999 unsigned Alignment = Ld->getAlignment();
25000 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
25001 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25002 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
25003 unsigned NumElems = RegVT.getVectorNumElements();
25007 SDValue Ptr = Ld->getBasePtr();
25008 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
25010 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
25012 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25013 Ld->getPointerInfo(), Ld->isVolatile(),
25014 Ld->isNonTemporal(), Ld->isInvariant(),
25016 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25017 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25018 Ld->getPointerInfo(), Ld->isVolatile(),
25019 Ld->isNonTemporal(), Ld->isInvariant(),
25020 std::min(16U, Alignment));
25021 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
25023 Load2.getValue(1));
25025 SDValue NewVec = DAG.getUNDEF(RegVT);
25026 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
25027 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
25028 return DCI.CombineTo(N, NewVec, TF, true);
25034 /// PerformMLOADCombine - Resolve extending loads
25035 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
25036 TargetLowering::DAGCombinerInfo &DCI,
25037 const X86Subtarget *Subtarget) {
25038 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
25039 if (Mld->getExtensionType() != ISD::SEXTLOAD)
25042 EVT VT = Mld->getValueType(0);
25043 unsigned NumElems = VT.getVectorNumElements();
25044 EVT LdVT = Mld->getMemoryVT();
25047 assert(LdVT != VT && "Cannot extend to the same type");
25048 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
25049 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
25050 // From, To sizes and ElemCount must be pow of two
25051 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25052 "Unexpected size for extending masked load");
25054 unsigned SizeRatio = ToSz / FromSz;
25055 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
25057 // Create a type on which we perform the shuffle
25058 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25059 LdVT.getScalarType(), NumElems*SizeRatio);
25060 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25062 // Convert Src0 value
25063 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
25064 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
25065 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25066 for (unsigned i = 0; i != NumElems; ++i)
25067 ShuffleVec[i] = i * SizeRatio;
25069 // Can't shuffle using an illegal type.
25070 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25071 && "WideVecVT should be legal");
25072 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
25073 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
25075 // Prepare the new mask
25077 SDValue Mask = Mld->getMask();
25078 if (Mask.getValueType() == VT) {
25079 // Mask and original value have the same type
25080 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25081 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25082 for (unsigned i = 0; i != NumElems; ++i)
25083 ShuffleVec[i] = i * SizeRatio;
25084 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25085 ShuffleVec[i] = NumElems*SizeRatio;
25086 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25087 DAG.getConstant(0, WideVecVT),
25091 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25092 unsigned WidenNumElts = NumElems*SizeRatio;
25093 unsigned MaskNumElts = VT.getVectorNumElements();
25094 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25097 unsigned NumConcat = WidenNumElts / MaskNumElts;
25098 SmallVector<SDValue, 16> Ops(NumConcat);
25099 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25101 for (unsigned i = 1; i != NumConcat; ++i)
25104 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25107 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
25108 Mld->getBasePtr(), NewMask, WideSrc0,
25109 Mld->getMemoryVT(), Mld->getMemOperand(),
25111 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
25112 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
25115 /// PerformMSTORECombine - Resolve truncating stores
25116 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
25117 const X86Subtarget *Subtarget) {
25118 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
25119 if (!Mst->isTruncatingStore())
25122 EVT VT = Mst->getValue().getValueType();
25123 unsigned NumElems = VT.getVectorNumElements();
25124 EVT StVT = Mst->getMemoryVT();
25127 assert(StVT != VT && "Cannot truncate to the same type");
25128 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25129 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25131 // From, To sizes and ElemCount must be pow of two
25132 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25133 "Unexpected size for truncating masked store");
25134 // We are going to use the original vector elt for storing.
25135 // Accumulated smaller vector elements must be a multiple of the store size.
25136 assert (((NumElems * FromSz) % ToSz) == 0 &&
25137 "Unexpected ratio for truncating masked store");
25139 unsigned SizeRatio = FromSz / ToSz;
25140 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25142 // Create a type on which we perform the shuffle
25143 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25144 StVT.getScalarType(), NumElems*SizeRatio);
25146 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25148 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
25149 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25150 for (unsigned i = 0; i != NumElems; ++i)
25151 ShuffleVec[i] = i * SizeRatio;
25153 // Can't shuffle using an illegal type.
25154 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25155 && "WideVecVT should be legal");
25157 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25158 DAG.getUNDEF(WideVecVT),
25162 SDValue Mask = Mst->getMask();
25163 if (Mask.getValueType() == VT) {
25164 // Mask and original value have the same type
25165 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25166 for (unsigned i = 0; i != NumElems; ++i)
25167 ShuffleVec[i] = i * SizeRatio;
25168 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25169 ShuffleVec[i] = NumElems*SizeRatio;
25170 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25171 DAG.getConstant(0, WideVecVT),
25175 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25176 unsigned WidenNumElts = NumElems*SizeRatio;
25177 unsigned MaskNumElts = VT.getVectorNumElements();
25178 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25181 unsigned NumConcat = WidenNumElts / MaskNumElts;
25182 SmallVector<SDValue, 16> Ops(NumConcat);
25183 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25185 for (unsigned i = 1; i != NumConcat; ++i)
25188 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25191 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
25192 NewMask, StVT, Mst->getMemOperand(), false);
25194 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
25195 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
25196 const X86Subtarget *Subtarget) {
25197 StoreSDNode *St = cast<StoreSDNode>(N);
25198 EVT VT = St->getValue().getValueType();
25199 EVT StVT = St->getMemoryVT();
25201 SDValue StoredVal = St->getOperand(1);
25202 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25204 // If we are saving a concatenation of two XMM registers and 32-byte stores
25205 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
25206 unsigned Alignment = St->getAlignment();
25207 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
25208 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25209 StVT == VT && !IsAligned) {
25210 unsigned NumElems = VT.getVectorNumElements();
25214 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
25215 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
25217 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
25218 SDValue Ptr0 = St->getBasePtr();
25219 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
25221 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
25222 St->getPointerInfo(), St->isVolatile(),
25223 St->isNonTemporal(), Alignment);
25224 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
25225 St->getPointerInfo(), St->isVolatile(),
25226 St->isNonTemporal(),
25227 std::min(16U, Alignment));
25228 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
25231 // Optimize trunc store (of multiple scalars) to shuffle and store.
25232 // First, pack all of the elements in one place. Next, store to memory
25233 // in fewer chunks.
25234 if (St->isTruncatingStore() && VT.isVector()) {
25235 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25236 unsigned NumElems = VT.getVectorNumElements();
25237 assert(StVT != VT && "Cannot truncate to the same type");
25238 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25239 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25241 // From, To sizes and ElemCount must be pow of two
25242 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
25243 // We are going to use the original vector elt for storing.
25244 // Accumulated smaller vector elements must be a multiple of the store size.
25245 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
25247 unsigned SizeRatio = FromSz / ToSz;
25249 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25251 // Create a type on which we perform the shuffle
25252 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25253 StVT.getScalarType(), NumElems*SizeRatio);
25255 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25257 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
25258 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
25259 for (unsigned i = 0; i != NumElems; ++i)
25260 ShuffleVec[i] = i * SizeRatio;
25262 // Can't shuffle using an illegal type.
25263 if (!TLI.isTypeLegal(WideVecVT))
25266 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25267 DAG.getUNDEF(WideVecVT),
25269 // At this point all of the data is stored at the bottom of the
25270 // register. We now need to save it to mem.
25272 // Find the largest store unit
25273 MVT StoreType = MVT::i8;
25274 for (MVT Tp : MVT::integer_valuetypes()) {
25275 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
25279 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
25280 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
25281 (64 <= NumElems * ToSz))
25282 StoreType = MVT::f64;
25284 // Bitcast the original vector into a vector of store-size units
25285 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
25286 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
25287 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
25288 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
25289 SmallVector<SDValue, 8> Chains;
25290 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25291 TLI.getPointerTy());
25292 SDValue Ptr = St->getBasePtr();
25294 // Perform one or more big stores into memory.
25295 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25296 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25297 StoreType, ShuffWide,
25298 DAG.getIntPtrConstant(i));
25299 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25300 St->getPointerInfo(), St->isVolatile(),
25301 St->isNonTemporal(), St->getAlignment());
25302 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25303 Chains.push_back(Ch);
25306 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25309 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25310 // the FP state in cases where an emms may be missing.
25311 // A preferable solution to the general problem is to figure out the right
25312 // places to insert EMMS. This qualifies as a quick hack.
25314 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25315 if (VT.getSizeInBits() != 64)
25318 const Function *F = DAG.getMachineFunction().getFunction();
25319 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
25320 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25321 && Subtarget->hasSSE2();
25322 if ((VT.isVector() ||
25323 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25324 isa<LoadSDNode>(St->getValue()) &&
25325 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25326 St->getChain().hasOneUse() && !St->isVolatile()) {
25327 SDNode* LdVal = St->getValue().getNode();
25328 LoadSDNode *Ld = nullptr;
25329 int TokenFactorIndex = -1;
25330 SmallVector<SDValue, 8> Ops;
25331 SDNode* ChainVal = St->getChain().getNode();
25332 // Must be a store of a load. We currently handle two cases: the load
25333 // is a direct child, and it's under an intervening TokenFactor. It is
25334 // possible to dig deeper under nested TokenFactors.
25335 if (ChainVal == LdVal)
25336 Ld = cast<LoadSDNode>(St->getChain());
25337 else if (St->getValue().hasOneUse() &&
25338 ChainVal->getOpcode() == ISD::TokenFactor) {
25339 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25340 if (ChainVal->getOperand(i).getNode() == LdVal) {
25341 TokenFactorIndex = i;
25342 Ld = cast<LoadSDNode>(St->getValue());
25344 Ops.push_back(ChainVal->getOperand(i));
25348 if (!Ld || !ISD::isNormalLoad(Ld))
25351 // If this is not the MMX case, i.e. we are just turning i64 load/store
25352 // into f64 load/store, avoid the transformation if there are multiple
25353 // uses of the loaded value.
25354 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25359 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25360 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25362 if (Subtarget->is64Bit() || F64IsLegal) {
25363 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25364 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25365 Ld->getPointerInfo(), Ld->isVolatile(),
25366 Ld->isNonTemporal(), Ld->isInvariant(),
25367 Ld->getAlignment());
25368 SDValue NewChain = NewLd.getValue(1);
25369 if (TokenFactorIndex != -1) {
25370 Ops.push_back(NewChain);
25371 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25373 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25374 St->getPointerInfo(),
25375 St->isVolatile(), St->isNonTemporal(),
25376 St->getAlignment());
25379 // Otherwise, lower to two pairs of 32-bit loads / stores.
25380 SDValue LoAddr = Ld->getBasePtr();
25381 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25382 DAG.getConstant(4, MVT::i32));
25384 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25385 Ld->getPointerInfo(),
25386 Ld->isVolatile(), Ld->isNonTemporal(),
25387 Ld->isInvariant(), Ld->getAlignment());
25388 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25389 Ld->getPointerInfo().getWithOffset(4),
25390 Ld->isVolatile(), Ld->isNonTemporal(),
25392 MinAlign(Ld->getAlignment(), 4));
25394 SDValue NewChain = LoLd.getValue(1);
25395 if (TokenFactorIndex != -1) {
25396 Ops.push_back(LoLd);
25397 Ops.push_back(HiLd);
25398 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25401 LoAddr = St->getBasePtr();
25402 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25403 DAG.getConstant(4, MVT::i32));
25405 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25406 St->getPointerInfo(),
25407 St->isVolatile(), St->isNonTemporal(),
25408 St->getAlignment());
25409 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25410 St->getPointerInfo().getWithOffset(4),
25412 St->isNonTemporal(),
25413 MinAlign(St->getAlignment(), 4));
25414 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25419 /// Return 'true' if this vector operation is "horizontal"
25420 /// and return the operands for the horizontal operation in LHS and RHS. A
25421 /// horizontal operation performs the binary operation on successive elements
25422 /// of its first operand, then on successive elements of its second operand,
25423 /// returning the resulting values in a vector. For example, if
25424 /// A = < float a0, float a1, float a2, float a3 >
25426 /// B = < float b0, float b1, float b2, float b3 >
25427 /// then the result of doing a horizontal operation on A and B is
25428 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25429 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25430 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25431 /// set to A, RHS to B, and the routine returns 'true'.
25432 /// Note that the binary operation should have the property that if one of the
25433 /// operands is UNDEF then the result is UNDEF.
25434 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25435 // Look for the following pattern: if
25436 // A = < float a0, float a1, float a2, float a3 >
25437 // B = < float b0, float b1, float b2, float b3 >
25439 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25440 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25441 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25442 // which is A horizontal-op B.
25444 // At least one of the operands should be a vector shuffle.
25445 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25446 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25449 MVT VT = LHS.getSimpleValueType();
25451 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25452 "Unsupported vector type for horizontal add/sub");
25454 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25455 // operate independently on 128-bit lanes.
25456 unsigned NumElts = VT.getVectorNumElements();
25457 unsigned NumLanes = VT.getSizeInBits()/128;
25458 unsigned NumLaneElts = NumElts / NumLanes;
25459 assert((NumLaneElts % 2 == 0) &&
25460 "Vector type should have an even number of elements in each lane");
25461 unsigned HalfLaneElts = NumLaneElts/2;
25463 // View LHS in the form
25464 // LHS = VECTOR_SHUFFLE A, B, LMask
25465 // If LHS is not a shuffle then pretend it is the shuffle
25466 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25467 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25470 SmallVector<int, 16> LMask(NumElts);
25471 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25472 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25473 A = LHS.getOperand(0);
25474 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25475 B = LHS.getOperand(1);
25476 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25477 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25479 if (LHS.getOpcode() != ISD::UNDEF)
25481 for (unsigned i = 0; i != NumElts; ++i)
25485 // Likewise, view RHS in the form
25486 // RHS = VECTOR_SHUFFLE C, D, RMask
25488 SmallVector<int, 16> RMask(NumElts);
25489 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25490 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25491 C = RHS.getOperand(0);
25492 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25493 D = RHS.getOperand(1);
25494 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25495 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25497 if (RHS.getOpcode() != ISD::UNDEF)
25499 for (unsigned i = 0; i != NumElts; ++i)
25503 // Check that the shuffles are both shuffling the same vectors.
25504 if (!(A == C && B == D) && !(A == D && B == C))
25507 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25508 if (!A.getNode() && !B.getNode())
25511 // If A and B occur in reverse order in RHS, then "swap" them (which means
25512 // rewriting the mask).
25514 CommuteVectorShuffleMask(RMask, NumElts);
25516 // At this point LHS and RHS are equivalent to
25517 // LHS = VECTOR_SHUFFLE A, B, LMask
25518 // RHS = VECTOR_SHUFFLE A, B, RMask
25519 // Check that the masks correspond to performing a horizontal operation.
25520 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25521 for (unsigned i = 0; i != NumLaneElts; ++i) {
25522 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25524 // Ignore any UNDEF components.
25525 if (LIdx < 0 || RIdx < 0 ||
25526 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25527 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25530 // Check that successive elements are being operated on. If not, this is
25531 // not a horizontal operation.
25532 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25533 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25534 if (!(LIdx == Index && RIdx == Index + 1) &&
25535 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25540 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25541 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25545 /// Do target-specific dag combines on floating point adds.
25546 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25547 const X86Subtarget *Subtarget) {
25548 EVT VT = N->getValueType(0);
25549 SDValue LHS = N->getOperand(0);
25550 SDValue RHS = N->getOperand(1);
25552 // Try to synthesize horizontal adds from adds of shuffles.
25553 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25554 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25555 isHorizontalBinOp(LHS, RHS, true))
25556 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25560 /// Do target-specific dag combines on floating point subs.
25561 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25562 const X86Subtarget *Subtarget) {
25563 EVT VT = N->getValueType(0);
25564 SDValue LHS = N->getOperand(0);
25565 SDValue RHS = N->getOperand(1);
25567 // Try to synthesize horizontal subs from subs of shuffles.
25568 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25569 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25570 isHorizontalBinOp(LHS, RHS, false))
25571 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25575 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25576 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25577 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25579 // F[X]OR(0.0, x) -> x
25580 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25581 if (C->getValueAPF().isPosZero())
25582 return N->getOperand(1);
25584 // F[X]OR(x, 0.0) -> x
25585 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25586 if (C->getValueAPF().isPosZero())
25587 return N->getOperand(0);
25591 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25592 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25593 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25595 // Only perform optimizations if UnsafeMath is used.
25596 if (!DAG.getTarget().Options.UnsafeFPMath)
25599 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25600 // into FMINC and FMAXC, which are Commutative operations.
25601 unsigned NewOp = 0;
25602 switch (N->getOpcode()) {
25603 default: llvm_unreachable("unknown opcode");
25604 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25605 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25608 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25609 N->getOperand(0), N->getOperand(1));
25612 /// Do target-specific dag combines on X86ISD::FAND nodes.
25613 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25614 // FAND(0.0, x) -> 0.0
25615 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25616 if (C->getValueAPF().isPosZero())
25617 return N->getOperand(0);
25619 // FAND(x, 0.0) -> 0.0
25620 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25621 if (C->getValueAPF().isPosZero())
25622 return N->getOperand(1);
25627 /// Do target-specific dag combines on X86ISD::FANDN nodes
25628 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25629 // FANDN(0.0, x) -> x
25630 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25631 if (C->getValueAPF().isPosZero())
25632 return N->getOperand(1);
25634 // FANDN(x, 0.0) -> 0.0
25635 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25636 if (C->getValueAPF().isPosZero())
25637 return N->getOperand(1);
25642 static SDValue PerformBTCombine(SDNode *N,
25644 TargetLowering::DAGCombinerInfo &DCI) {
25645 // BT ignores high bits in the bit index operand.
25646 SDValue Op1 = N->getOperand(1);
25647 if (Op1.hasOneUse()) {
25648 unsigned BitWidth = Op1.getValueSizeInBits();
25649 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25650 APInt KnownZero, KnownOne;
25651 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25652 !DCI.isBeforeLegalizeOps());
25653 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25654 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25655 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25656 DCI.CommitTargetLoweringOpt(TLO);
25661 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25662 SDValue Op = N->getOperand(0);
25663 if (Op.getOpcode() == ISD::BITCAST)
25664 Op = Op.getOperand(0);
25665 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25666 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25667 VT.getVectorElementType().getSizeInBits() ==
25668 OpVT.getVectorElementType().getSizeInBits()) {
25669 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25674 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25675 const X86Subtarget *Subtarget) {
25676 EVT VT = N->getValueType(0);
25677 if (!VT.isVector())
25680 SDValue N0 = N->getOperand(0);
25681 SDValue N1 = N->getOperand(1);
25682 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25685 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25686 // both SSE and AVX2 since there is no sign-extended shift right
25687 // operation on a vector with 64-bit elements.
25688 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25689 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25690 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25691 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25692 SDValue N00 = N0.getOperand(0);
25694 // EXTLOAD has a better solution on AVX2,
25695 // it may be replaced with X86ISD::VSEXT node.
25696 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25697 if (!ISD::isNormalLoad(N00.getNode()))
25700 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25701 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25703 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25709 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25710 TargetLowering::DAGCombinerInfo &DCI,
25711 const X86Subtarget *Subtarget) {
25712 SDValue N0 = N->getOperand(0);
25713 EVT VT = N->getValueType(0);
25715 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25716 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25717 // This exposes the sext to the sdivrem lowering, so that it directly extends
25718 // from AH (which we otherwise need to do contortions to access).
25719 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25720 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25722 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25723 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25724 N0.getOperand(0), N0.getOperand(1));
25725 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25726 return R.getValue(1);
25729 if (!DCI.isBeforeLegalizeOps())
25732 if (!Subtarget->hasFp256())
25735 if (VT.isVector() && VT.getSizeInBits() == 256) {
25736 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25744 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25745 const X86Subtarget* Subtarget) {
25747 EVT VT = N->getValueType(0);
25749 // Let legalize expand this if it isn't a legal type yet.
25750 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25753 EVT ScalarVT = VT.getScalarType();
25754 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25755 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25758 SDValue A = N->getOperand(0);
25759 SDValue B = N->getOperand(1);
25760 SDValue C = N->getOperand(2);
25762 bool NegA = (A.getOpcode() == ISD::FNEG);
25763 bool NegB = (B.getOpcode() == ISD::FNEG);
25764 bool NegC = (C.getOpcode() == ISD::FNEG);
25766 // Negative multiplication when NegA xor NegB
25767 bool NegMul = (NegA != NegB);
25769 A = A.getOperand(0);
25771 B = B.getOperand(0);
25773 C = C.getOperand(0);
25777 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25779 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25781 return DAG.getNode(Opcode, dl, VT, A, B, C);
25784 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25785 TargetLowering::DAGCombinerInfo &DCI,
25786 const X86Subtarget *Subtarget) {
25787 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25788 // (and (i32 x86isd::setcc_carry), 1)
25789 // This eliminates the zext. This transformation is necessary because
25790 // ISD::SETCC is always legalized to i8.
25792 SDValue N0 = N->getOperand(0);
25793 EVT VT = N->getValueType(0);
25795 if (N0.getOpcode() == ISD::AND &&
25797 N0.getOperand(0).hasOneUse()) {
25798 SDValue N00 = N0.getOperand(0);
25799 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25800 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25801 if (!C || C->getZExtValue() != 1)
25803 return DAG.getNode(ISD::AND, dl, VT,
25804 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25805 N00.getOperand(0), N00.getOperand(1)),
25806 DAG.getConstant(1, VT));
25810 if (N0.getOpcode() == ISD::TRUNCATE &&
25812 N0.getOperand(0).hasOneUse()) {
25813 SDValue N00 = N0.getOperand(0);
25814 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25815 return DAG.getNode(ISD::AND, dl, VT,
25816 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25817 N00.getOperand(0), N00.getOperand(1)),
25818 DAG.getConstant(1, VT));
25821 if (VT.is256BitVector()) {
25822 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25827 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25828 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25829 // This exposes the zext to the udivrem lowering, so that it directly extends
25830 // from AH (which we otherwise need to do contortions to access).
25831 if (N0.getOpcode() == ISD::UDIVREM &&
25832 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25833 (VT == MVT::i32 || VT == MVT::i64)) {
25834 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25835 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25836 N0.getOperand(0), N0.getOperand(1));
25837 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25838 return R.getValue(1);
25844 // Optimize x == -y --> x+y == 0
25845 // x != -y --> x+y != 0
25846 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25847 const X86Subtarget* Subtarget) {
25848 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25849 SDValue LHS = N->getOperand(0);
25850 SDValue RHS = N->getOperand(1);
25851 EVT VT = N->getValueType(0);
25854 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
25855 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
25856 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
25857 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25858 LHS.getValueType(), RHS, LHS.getOperand(1));
25859 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25860 addV, DAG.getConstant(0, addV.getValueType()), CC);
25862 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
25863 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
25864 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
25865 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25866 RHS.getValueType(), LHS, RHS.getOperand(1));
25867 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25868 addV, DAG.getConstant(0, addV.getValueType()), CC);
25871 if (VT.getScalarType() == MVT::i1) {
25872 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
25873 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25874 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
25875 if (!IsSEXT0 && !IsVZero0)
25877 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
25878 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25879 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
25881 if (!IsSEXT1 && !IsVZero1)
25884 if (IsSEXT0 && IsVZero1) {
25885 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
25886 if (CC == ISD::SETEQ)
25887 return DAG.getNOT(DL, LHS.getOperand(0), VT);
25888 return LHS.getOperand(0);
25890 if (IsSEXT1 && IsVZero0) {
25891 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
25892 if (CC == ISD::SETEQ)
25893 return DAG.getNOT(DL, RHS.getOperand(0), VT);
25894 return RHS.getOperand(0);
25901 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
25902 const X86Subtarget *Subtarget) {
25904 MVT VT = N->getOperand(1)->getSimpleValueType(0);
25905 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
25906 "X86insertps is only defined for v4x32");
25908 SDValue Ld = N->getOperand(1);
25909 if (MayFoldLoad(Ld)) {
25910 // Extract the countS bits from the immediate so we can get the proper
25911 // address when narrowing the vector load to a specific element.
25912 // When the second source op is a memory address, interps doesn't use
25913 // countS and just gets an f32 from that address.
25914 unsigned DestIndex =
25915 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
25916 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
25920 // Create this as a scalar to vector to match the instruction pattern.
25921 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
25922 // countS bits are ignored when loading from memory on insertps, which
25923 // means we don't need to explicitly set them to 0.
25924 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
25925 LoadScalarToVector, N->getOperand(2));
25928 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
25929 // as "sbb reg,reg", since it can be extended without zext and produces
25930 // an all-ones bit which is more useful than 0/1 in some cases.
25931 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
25934 return DAG.getNode(ISD::AND, DL, VT,
25935 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25936 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
25937 DAG.getConstant(1, VT));
25938 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
25939 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
25940 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25941 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
25944 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
25945 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
25946 TargetLowering::DAGCombinerInfo &DCI,
25947 const X86Subtarget *Subtarget) {
25949 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
25950 SDValue EFLAGS = N->getOperand(1);
25952 if (CC == X86::COND_A) {
25953 // Try to convert COND_A into COND_B in an attempt to facilitate
25954 // materializing "setb reg".
25956 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
25957 // cannot take an immediate as its first operand.
25959 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
25960 EFLAGS.getValueType().isInteger() &&
25961 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
25962 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
25963 EFLAGS.getNode()->getVTList(),
25964 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
25965 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
25966 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
25970 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
25971 // a zext and produces an all-ones bit which is more useful than 0/1 in some
25973 if (CC == X86::COND_B)
25974 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
25978 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
25979 if (Flags.getNode()) {
25980 SDValue Cond = DAG.getConstant(CC, MVT::i8);
25981 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
25987 // Optimize branch condition evaluation.
25989 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
25990 TargetLowering::DAGCombinerInfo &DCI,
25991 const X86Subtarget *Subtarget) {
25993 SDValue Chain = N->getOperand(0);
25994 SDValue Dest = N->getOperand(1);
25995 SDValue EFLAGS = N->getOperand(3);
25996 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
26000 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26001 if (Flags.getNode()) {
26002 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26003 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
26010 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
26011 SelectionDAG &DAG) {
26012 // Take advantage of vector comparisons producing 0 or -1 in each lane to
26013 // optimize away operation when it's from a constant.
26015 // The general transformation is:
26016 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
26017 // AND(VECTOR_CMP(x,y), constant2)
26018 // constant2 = UNARYOP(constant)
26020 // Early exit if this isn't a vector operation, the operand of the
26021 // unary operation isn't a bitwise AND, or if the sizes of the operations
26022 // aren't the same.
26023 EVT VT = N->getValueType(0);
26024 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
26025 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
26026 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
26029 // Now check that the other operand of the AND is a constant. We could
26030 // make the transformation for non-constant splats as well, but it's unclear
26031 // that would be a benefit as it would not eliminate any operations, just
26032 // perform one more step in scalar code before moving to the vector unit.
26033 if (BuildVectorSDNode *BV =
26034 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
26035 // Bail out if the vector isn't a constant.
26036 if (!BV->isConstant())
26039 // Everything checks out. Build up the new and improved node.
26041 EVT IntVT = BV->getValueType(0);
26042 // Create a new constant of the appropriate type for the transformed
26044 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
26045 // The AND node needs bitcasts to/from an integer vector type around it.
26046 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
26047 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
26048 N->getOperand(0)->getOperand(0), MaskConst);
26049 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
26056 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
26057 const X86Subtarget *Subtarget) {
26058 // First try to optimize away the conversion entirely when it's
26059 // conditionally from a constant. Vectors only.
26060 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
26061 if (Res != SDValue())
26064 // Now move on to more general possibilities.
26065 SDValue Op0 = N->getOperand(0);
26066 EVT InVT = Op0->getValueType(0);
26068 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
26069 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
26071 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
26072 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
26073 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
26076 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
26077 // a 32-bit target where SSE doesn't support i64->FP operations.
26078 if (Op0.getOpcode() == ISD::LOAD) {
26079 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
26080 EVT VT = Ld->getValueType(0);
26081 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
26082 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
26083 !Subtarget->is64Bit() && VT == MVT::i64) {
26084 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
26085 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
26086 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
26093 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
26094 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
26095 X86TargetLowering::DAGCombinerInfo &DCI) {
26096 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
26097 // the result is either zero or one (depending on the input carry bit).
26098 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
26099 if (X86::isZeroNode(N->getOperand(0)) &&
26100 X86::isZeroNode(N->getOperand(1)) &&
26101 // We don't have a good way to replace an EFLAGS use, so only do this when
26103 SDValue(N, 1).use_empty()) {
26105 EVT VT = N->getValueType(0);
26106 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
26107 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
26108 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
26109 DAG.getConstant(X86::COND_B,MVT::i8),
26111 DAG.getConstant(1, VT));
26112 return DCI.CombineTo(N, Res1, CarryOut);
26118 // fold (add Y, (sete X, 0)) -> adc 0, Y
26119 // (add Y, (setne X, 0)) -> sbb -1, Y
26120 // (sub (sete X, 0), Y) -> sbb 0, Y
26121 // (sub (setne X, 0), Y) -> adc -1, Y
26122 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
26125 // Look through ZExts.
26126 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
26127 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
26130 SDValue SetCC = Ext.getOperand(0);
26131 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
26134 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
26135 if (CC != X86::COND_E && CC != X86::COND_NE)
26138 SDValue Cmp = SetCC.getOperand(1);
26139 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
26140 !X86::isZeroNode(Cmp.getOperand(1)) ||
26141 !Cmp.getOperand(0).getValueType().isInteger())
26144 SDValue CmpOp0 = Cmp.getOperand(0);
26145 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
26146 DAG.getConstant(1, CmpOp0.getValueType()));
26148 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
26149 if (CC == X86::COND_NE)
26150 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
26151 DL, OtherVal.getValueType(), OtherVal,
26152 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
26153 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
26154 DL, OtherVal.getValueType(), OtherVal,
26155 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
26158 /// PerformADDCombine - Do target-specific dag combines on integer adds.
26159 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
26160 const X86Subtarget *Subtarget) {
26161 EVT VT = N->getValueType(0);
26162 SDValue Op0 = N->getOperand(0);
26163 SDValue Op1 = N->getOperand(1);
26165 // Try to synthesize horizontal adds from adds of shuffles.
26166 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26167 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26168 isHorizontalBinOp(Op0, Op1, true))
26169 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
26171 return OptimizeConditionalInDecrement(N, DAG);
26174 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
26175 const X86Subtarget *Subtarget) {
26176 SDValue Op0 = N->getOperand(0);
26177 SDValue Op1 = N->getOperand(1);
26179 // X86 can't encode an immediate LHS of a sub. See if we can push the
26180 // negation into a preceding instruction.
26181 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
26182 // If the RHS of the sub is a XOR with one use and a constant, invert the
26183 // immediate. Then add one to the LHS of the sub so we can turn
26184 // X-Y -> X+~Y+1, saving one register.
26185 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
26186 isa<ConstantSDNode>(Op1.getOperand(1))) {
26187 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
26188 EVT VT = Op0.getValueType();
26189 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
26191 DAG.getConstant(~XorC, VT));
26192 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
26193 DAG.getConstant(C->getAPIntValue()+1, VT));
26197 // Try to synthesize horizontal adds from adds of shuffles.
26198 EVT VT = N->getValueType(0);
26199 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26200 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26201 isHorizontalBinOp(Op0, Op1, true))
26202 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
26204 return OptimizeConditionalInDecrement(N, DAG);
26207 /// performVZEXTCombine - Performs build vector combines
26208 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
26209 TargetLowering::DAGCombinerInfo &DCI,
26210 const X86Subtarget *Subtarget) {
26212 MVT VT = N->getSimpleValueType(0);
26213 SDValue Op = N->getOperand(0);
26214 MVT OpVT = Op.getSimpleValueType();
26215 MVT OpEltVT = OpVT.getVectorElementType();
26216 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
26218 // (vzext (bitcast (vzext (x)) -> (vzext x)
26220 while (V.getOpcode() == ISD::BITCAST)
26221 V = V.getOperand(0);
26223 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
26224 MVT InnerVT = V.getSimpleValueType();
26225 MVT InnerEltVT = InnerVT.getVectorElementType();
26227 // If the element sizes match exactly, we can just do one larger vzext. This
26228 // is always an exact type match as vzext operates on integer types.
26229 if (OpEltVT == InnerEltVT) {
26230 assert(OpVT == InnerVT && "Types must match for vzext!");
26231 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
26234 // The only other way we can combine them is if only a single element of the
26235 // inner vzext is used in the input to the outer vzext.
26236 if (InnerEltVT.getSizeInBits() < InputBits)
26239 // In this case, the inner vzext is completely dead because we're going to
26240 // only look at bits inside of the low element. Just do the outer vzext on
26241 // a bitcast of the input to the inner.
26242 return DAG.getNode(X86ISD::VZEXT, DL, VT,
26243 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
26246 // Check if we can bypass extracting and re-inserting an element of an input
26247 // vector. Essentialy:
26248 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
26249 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
26250 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
26251 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
26252 SDValue ExtractedV = V.getOperand(0);
26253 SDValue OrigV = ExtractedV.getOperand(0);
26254 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
26255 if (ExtractIdx->getZExtValue() == 0) {
26256 MVT OrigVT = OrigV.getSimpleValueType();
26257 // Extract a subvector if necessary...
26258 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
26259 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
26260 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
26261 OrigVT.getVectorNumElements() / Ratio);
26262 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
26263 DAG.getIntPtrConstant(0));
26265 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
26266 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
26273 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
26274 DAGCombinerInfo &DCI) const {
26275 SelectionDAG &DAG = DCI.DAG;
26276 switch (N->getOpcode()) {
26278 case ISD::EXTRACT_VECTOR_ELT:
26279 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
26282 case X86ISD::SHRUNKBLEND:
26283 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
26284 case ISD::BITCAST: return PerformBITCASTCombine(N, DAG);
26285 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
26286 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
26287 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
26288 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
26289 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
26292 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
26293 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
26294 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
26295 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26296 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26297 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26298 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26299 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26300 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
26301 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26302 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26304 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26306 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26307 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26308 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26309 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26310 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26311 case ISD::ANY_EXTEND:
26312 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26313 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26314 case ISD::SIGN_EXTEND_INREG:
26315 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26316 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26317 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26318 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26319 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26320 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26321 case X86ISD::SHUFP: // Handle all target specific shuffles
26322 case X86ISD::PALIGNR:
26323 case X86ISD::UNPCKH:
26324 case X86ISD::UNPCKL:
26325 case X86ISD::MOVHLPS:
26326 case X86ISD::MOVLHPS:
26327 case X86ISD::PSHUFB:
26328 case X86ISD::PSHUFD:
26329 case X86ISD::PSHUFHW:
26330 case X86ISD::PSHUFLW:
26331 case X86ISD::MOVSS:
26332 case X86ISD::MOVSD:
26333 case X86ISD::VPERMILPI:
26334 case X86ISD::VPERM2X128:
26335 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26336 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26337 case ISD::INTRINSIC_WO_CHAIN:
26338 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26339 case X86ISD::INSERTPS: {
26340 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
26341 return PerformINSERTPSCombine(N, DAG, Subtarget);
26344 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26350 /// isTypeDesirableForOp - Return true if the target has native support for
26351 /// the specified value type and it is 'desirable' to use the type for the
26352 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26353 /// instruction encodings are longer and some i16 instructions are slow.
26354 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26355 if (!isTypeLegal(VT))
26357 if (VT != MVT::i16)
26364 case ISD::SIGN_EXTEND:
26365 case ISD::ZERO_EXTEND:
26366 case ISD::ANY_EXTEND:
26379 /// IsDesirableToPromoteOp - This method query the target whether it is
26380 /// beneficial for dag combiner to promote the specified node. If true, it
26381 /// should return the desired promotion type by reference.
26382 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26383 EVT VT = Op.getValueType();
26384 if (VT != MVT::i16)
26387 bool Promote = false;
26388 bool Commute = false;
26389 switch (Op.getOpcode()) {
26392 LoadSDNode *LD = cast<LoadSDNode>(Op);
26393 // If the non-extending load has a single use and it's not live out, then it
26394 // might be folded.
26395 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26396 Op.hasOneUse()*/) {
26397 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26398 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26399 // The only case where we'd want to promote LOAD (rather then it being
26400 // promoted as an operand is when it's only use is liveout.
26401 if (UI->getOpcode() != ISD::CopyToReg)
26408 case ISD::SIGN_EXTEND:
26409 case ISD::ZERO_EXTEND:
26410 case ISD::ANY_EXTEND:
26415 SDValue N0 = Op.getOperand(0);
26416 // Look out for (store (shl (load), x)).
26417 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26430 SDValue N0 = Op.getOperand(0);
26431 SDValue N1 = Op.getOperand(1);
26432 if (!Commute && MayFoldLoad(N1))
26434 // Avoid disabling potential load folding opportunities.
26435 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26437 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26447 //===----------------------------------------------------------------------===//
26448 // X86 Inline Assembly Support
26449 //===----------------------------------------------------------------------===//
26452 // Helper to match a string separated by whitespace.
26453 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26454 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26456 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26457 StringRef piece(*args[i]);
26458 if (!s.startswith(piece)) // Check if the piece matches.
26461 s = s.substr(piece.size());
26462 StringRef::size_type pos = s.find_first_not_of(" \t");
26463 if (pos == 0) // We matched a prefix.
26471 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26474 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26476 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26477 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26478 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26479 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26481 if (AsmPieces.size() == 3)
26483 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26490 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26491 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26493 std::string AsmStr = IA->getAsmString();
26495 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26496 if (!Ty || Ty->getBitWidth() % 16 != 0)
26499 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26500 SmallVector<StringRef, 4> AsmPieces;
26501 SplitString(AsmStr, AsmPieces, ";\n");
26503 switch (AsmPieces.size()) {
26504 default: return false;
26506 // FIXME: this should verify that we are targeting a 486 or better. If not,
26507 // we will turn this bswap into something that will be lowered to logical
26508 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26509 // lower so don't worry about this.
26511 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26512 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26513 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26514 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26515 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26516 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26517 // No need to check constraints, nothing other than the equivalent of
26518 // "=r,0" would be valid here.
26519 return IntrinsicLowering::LowerToByteSwap(CI);
26522 // rorw $$8, ${0:w} --> llvm.bswap.i16
26523 if (CI->getType()->isIntegerTy(16) &&
26524 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26525 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26526 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26528 const std::string &ConstraintsStr = IA->getConstraintString();
26529 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26530 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26531 if (clobbersFlagRegisters(AsmPieces))
26532 return IntrinsicLowering::LowerToByteSwap(CI);
26536 if (CI->getType()->isIntegerTy(32) &&
26537 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26538 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26539 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26540 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26542 const std::string &ConstraintsStr = IA->getConstraintString();
26543 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26544 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26545 if (clobbersFlagRegisters(AsmPieces))
26546 return IntrinsicLowering::LowerToByteSwap(CI);
26549 if (CI->getType()->isIntegerTy(64)) {
26550 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26551 if (Constraints.size() >= 2 &&
26552 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26553 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26554 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26555 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26556 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26557 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26558 return IntrinsicLowering::LowerToByteSwap(CI);
26566 /// getConstraintType - Given a constraint letter, return the type of
26567 /// constraint it is for this target.
26568 X86TargetLowering::ConstraintType
26569 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26570 if (Constraint.size() == 1) {
26571 switch (Constraint[0]) {
26582 return C_RegisterClass;
26606 return TargetLowering::getConstraintType(Constraint);
26609 /// Examine constraint type and operand type and determine a weight value.
26610 /// This object must already have been set up with the operand type
26611 /// and the current alternative constraint selected.
26612 TargetLowering::ConstraintWeight
26613 X86TargetLowering::getSingleConstraintMatchWeight(
26614 AsmOperandInfo &info, const char *constraint) const {
26615 ConstraintWeight weight = CW_Invalid;
26616 Value *CallOperandVal = info.CallOperandVal;
26617 // If we don't have a value, we can't do a match,
26618 // but allow it at the lowest weight.
26619 if (!CallOperandVal)
26621 Type *type = CallOperandVal->getType();
26622 // Look at the constraint type.
26623 switch (*constraint) {
26625 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26636 if (CallOperandVal->getType()->isIntegerTy())
26637 weight = CW_SpecificReg;
26642 if (type->isFloatingPointTy())
26643 weight = CW_SpecificReg;
26646 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26647 weight = CW_SpecificReg;
26651 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26652 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26653 weight = CW_Register;
26656 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26657 if (C->getZExtValue() <= 31)
26658 weight = CW_Constant;
26662 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26663 if (C->getZExtValue() <= 63)
26664 weight = CW_Constant;
26668 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26669 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26670 weight = CW_Constant;
26674 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26675 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26676 weight = CW_Constant;
26680 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26681 if (C->getZExtValue() <= 3)
26682 weight = CW_Constant;
26686 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26687 if (C->getZExtValue() <= 0xff)
26688 weight = CW_Constant;
26693 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26694 weight = CW_Constant;
26698 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26699 if ((C->getSExtValue() >= -0x80000000LL) &&
26700 (C->getSExtValue() <= 0x7fffffffLL))
26701 weight = CW_Constant;
26705 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26706 if (C->getZExtValue() <= 0xffffffff)
26707 weight = CW_Constant;
26714 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26715 /// with another that has more specific requirements based on the type of the
26716 /// corresponding operand.
26717 const char *X86TargetLowering::
26718 LowerXConstraint(EVT ConstraintVT) const {
26719 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26720 // 'f' like normal targets.
26721 if (ConstraintVT.isFloatingPoint()) {
26722 if (Subtarget->hasSSE2())
26724 if (Subtarget->hasSSE1())
26728 return TargetLowering::LowerXConstraint(ConstraintVT);
26731 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26732 /// vector. If it is invalid, don't add anything to Ops.
26733 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26734 std::string &Constraint,
26735 std::vector<SDValue>&Ops,
26736 SelectionDAG &DAG) const {
26739 // Only support length 1 constraints for now.
26740 if (Constraint.length() > 1) return;
26742 char ConstraintLetter = Constraint[0];
26743 switch (ConstraintLetter) {
26746 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26747 if (C->getZExtValue() <= 31) {
26748 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26754 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26755 if (C->getZExtValue() <= 63) {
26756 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26762 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26763 if (isInt<8>(C->getSExtValue())) {
26764 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26770 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26771 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26772 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26773 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26779 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26780 if (C->getZExtValue() <= 3) {
26781 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26787 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26788 if (C->getZExtValue() <= 255) {
26789 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26795 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26796 if (C->getZExtValue() <= 127) {
26797 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26803 // 32-bit signed value
26804 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26805 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26806 C->getSExtValue())) {
26807 // Widen to 64 bits here to get it sign extended.
26808 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26811 // FIXME gcc accepts some relocatable values here too, but only in certain
26812 // memory models; it's complicated.
26817 // 32-bit unsigned value
26818 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26819 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26820 C->getZExtValue())) {
26821 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26825 // FIXME gcc accepts some relocatable values here too, but only in certain
26826 // memory models; it's complicated.
26830 // Literal immediates are always ok.
26831 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26832 // Widen to 64 bits here to get it sign extended.
26833 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26837 // In any sort of PIC mode addresses need to be computed at runtime by
26838 // adding in a register or some sort of table lookup. These can't
26839 // be used as immediates.
26840 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26843 // If we are in non-pic codegen mode, we allow the address of a global (with
26844 // an optional displacement) to be used with 'i'.
26845 GlobalAddressSDNode *GA = nullptr;
26846 int64_t Offset = 0;
26848 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26850 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26851 Offset += GA->getOffset();
26853 } else if (Op.getOpcode() == ISD::ADD) {
26854 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26855 Offset += C->getZExtValue();
26856 Op = Op.getOperand(0);
26859 } else if (Op.getOpcode() == ISD::SUB) {
26860 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26861 Offset += -C->getZExtValue();
26862 Op = Op.getOperand(0);
26867 // Otherwise, this isn't something we can handle, reject it.
26871 const GlobalValue *GV = GA->getGlobal();
26872 // If we require an extra load to get this address, as in PIC mode, we
26873 // can't accept it.
26874 if (isGlobalStubReference(
26875 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
26878 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
26879 GA->getValueType(0), Offset);
26884 if (Result.getNode()) {
26885 Ops.push_back(Result);
26888 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
26891 std::pair<unsigned, const TargetRegisterClass*>
26892 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
26894 // First, see if this is a constraint that directly corresponds to an LLVM
26896 if (Constraint.size() == 1) {
26897 // GCC Constraint Letters
26898 switch (Constraint[0]) {
26900 // TODO: Slight differences here in allocation order and leaving
26901 // RIP in the class. Do they matter any more here than they do
26902 // in the normal allocation?
26903 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
26904 if (Subtarget->is64Bit()) {
26905 if (VT == MVT::i32 || VT == MVT::f32)
26906 return std::make_pair(0U, &X86::GR32RegClass);
26907 if (VT == MVT::i16)
26908 return std::make_pair(0U, &X86::GR16RegClass);
26909 if (VT == MVT::i8 || VT == MVT::i1)
26910 return std::make_pair(0U, &X86::GR8RegClass);
26911 if (VT == MVT::i64 || VT == MVT::f64)
26912 return std::make_pair(0U, &X86::GR64RegClass);
26915 // 32-bit fallthrough
26916 case 'Q': // Q_REGS
26917 if (VT == MVT::i32 || VT == MVT::f32)
26918 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
26919 if (VT == MVT::i16)
26920 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
26921 if (VT == MVT::i8 || VT == MVT::i1)
26922 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
26923 if (VT == MVT::i64)
26924 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
26926 case 'r': // GENERAL_REGS
26927 case 'l': // INDEX_REGS
26928 if (VT == MVT::i8 || VT == MVT::i1)
26929 return std::make_pair(0U, &X86::GR8RegClass);
26930 if (VT == MVT::i16)
26931 return std::make_pair(0U, &X86::GR16RegClass);
26932 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
26933 return std::make_pair(0U, &X86::GR32RegClass);
26934 return std::make_pair(0U, &X86::GR64RegClass);
26935 case 'R': // LEGACY_REGS
26936 if (VT == MVT::i8 || VT == MVT::i1)
26937 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
26938 if (VT == MVT::i16)
26939 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
26940 if (VT == MVT::i32 || !Subtarget->is64Bit())
26941 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
26942 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
26943 case 'f': // FP Stack registers.
26944 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
26945 // value to the correct fpstack register class.
26946 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
26947 return std::make_pair(0U, &X86::RFP32RegClass);
26948 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
26949 return std::make_pair(0U, &X86::RFP64RegClass);
26950 return std::make_pair(0U, &X86::RFP80RegClass);
26951 case 'y': // MMX_REGS if MMX allowed.
26952 if (!Subtarget->hasMMX()) break;
26953 return std::make_pair(0U, &X86::VR64RegClass);
26954 case 'Y': // SSE_REGS if SSE2 allowed
26955 if (!Subtarget->hasSSE2()) break;
26957 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
26958 if (!Subtarget->hasSSE1()) break;
26960 switch (VT.SimpleTy) {
26962 // Scalar SSE types.
26965 return std::make_pair(0U, &X86::FR32RegClass);
26968 return std::make_pair(0U, &X86::FR64RegClass);
26976 return std::make_pair(0U, &X86::VR128RegClass);
26984 return std::make_pair(0U, &X86::VR256RegClass);
26989 return std::make_pair(0U, &X86::VR512RegClass);
26995 // Use the default implementation in TargetLowering to convert the register
26996 // constraint into a member of a register class.
26997 std::pair<unsigned, const TargetRegisterClass*> Res;
26998 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
27000 // Not found as a standard register?
27002 // Map st(0) -> st(7) -> ST0
27003 if (Constraint.size() == 7 && Constraint[0] == '{' &&
27004 tolower(Constraint[1]) == 's' &&
27005 tolower(Constraint[2]) == 't' &&
27006 Constraint[3] == '(' &&
27007 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
27008 Constraint[5] == ')' &&
27009 Constraint[6] == '}') {
27011 Res.first = X86::FP0+Constraint[4]-'0';
27012 Res.second = &X86::RFP80RegClass;
27016 // GCC allows "st(0)" to be called just plain "st".
27017 if (StringRef("{st}").equals_lower(Constraint)) {
27018 Res.first = X86::FP0;
27019 Res.second = &X86::RFP80RegClass;
27024 if (StringRef("{flags}").equals_lower(Constraint)) {
27025 Res.first = X86::EFLAGS;
27026 Res.second = &X86::CCRRegClass;
27030 // 'A' means EAX + EDX.
27031 if (Constraint == "A") {
27032 Res.first = X86::EAX;
27033 Res.second = &X86::GR32_ADRegClass;
27039 // Otherwise, check to see if this is a register class of the wrong value
27040 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
27041 // turn into {ax},{dx}.
27042 if (Res.second->hasType(VT))
27043 return Res; // Correct type already, nothing to do.
27045 // All of the single-register GCC register classes map their values onto
27046 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
27047 // really want an 8-bit or 32-bit register, map to the appropriate register
27048 // class and return the appropriate register.
27049 if (Res.second == &X86::GR16RegClass) {
27050 if (VT == MVT::i8 || VT == MVT::i1) {
27051 unsigned DestReg = 0;
27052 switch (Res.first) {
27054 case X86::AX: DestReg = X86::AL; break;
27055 case X86::DX: DestReg = X86::DL; break;
27056 case X86::CX: DestReg = X86::CL; break;
27057 case X86::BX: DestReg = X86::BL; break;
27060 Res.first = DestReg;
27061 Res.second = &X86::GR8RegClass;
27063 } else if (VT == MVT::i32 || VT == MVT::f32) {
27064 unsigned DestReg = 0;
27065 switch (Res.first) {
27067 case X86::AX: DestReg = X86::EAX; break;
27068 case X86::DX: DestReg = X86::EDX; break;
27069 case X86::CX: DestReg = X86::ECX; break;
27070 case X86::BX: DestReg = X86::EBX; break;
27071 case X86::SI: DestReg = X86::ESI; break;
27072 case X86::DI: DestReg = X86::EDI; break;
27073 case X86::BP: DestReg = X86::EBP; break;
27074 case X86::SP: DestReg = X86::ESP; break;
27077 Res.first = DestReg;
27078 Res.second = &X86::GR32RegClass;
27080 } else if (VT == MVT::i64 || VT == MVT::f64) {
27081 unsigned DestReg = 0;
27082 switch (Res.first) {
27084 case X86::AX: DestReg = X86::RAX; break;
27085 case X86::DX: DestReg = X86::RDX; break;
27086 case X86::CX: DestReg = X86::RCX; break;
27087 case X86::BX: DestReg = X86::RBX; break;
27088 case X86::SI: DestReg = X86::RSI; break;
27089 case X86::DI: DestReg = X86::RDI; break;
27090 case X86::BP: DestReg = X86::RBP; break;
27091 case X86::SP: DestReg = X86::RSP; break;
27094 Res.first = DestReg;
27095 Res.second = &X86::GR64RegClass;
27098 } else if (Res.second == &X86::FR32RegClass ||
27099 Res.second == &X86::FR64RegClass ||
27100 Res.second == &X86::VR128RegClass ||
27101 Res.second == &X86::VR256RegClass ||
27102 Res.second == &X86::FR32XRegClass ||
27103 Res.second == &X86::FR64XRegClass ||
27104 Res.second == &X86::VR128XRegClass ||
27105 Res.second == &X86::VR256XRegClass ||
27106 Res.second == &X86::VR512RegClass) {
27107 // Handle references to XMM physical registers that got mapped into the
27108 // wrong class. This can happen with constraints like {xmm0} where the
27109 // target independent register mapper will just pick the first match it can
27110 // find, ignoring the required type.
27112 if (VT == MVT::f32 || VT == MVT::i32)
27113 Res.second = &X86::FR32RegClass;
27114 else if (VT == MVT::f64 || VT == MVT::i64)
27115 Res.second = &X86::FR64RegClass;
27116 else if (X86::VR128RegClass.hasType(VT))
27117 Res.second = &X86::VR128RegClass;
27118 else if (X86::VR256RegClass.hasType(VT))
27119 Res.second = &X86::VR256RegClass;
27120 else if (X86::VR512RegClass.hasType(VT))
27121 Res.second = &X86::VR512RegClass;
27127 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
27129 // Scaling factors are not free at all.
27130 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
27131 // will take 2 allocations in the out of order engine instead of 1
27132 // for plain addressing mode, i.e. inst (reg1).
27134 // vaddps (%rsi,%drx), %ymm0, %ymm1
27135 // Requires two allocations (one for the load, one for the computation)
27137 // vaddps (%rsi), %ymm0, %ymm1
27138 // Requires just 1 allocation, i.e., freeing allocations for other operations
27139 // and having less micro operations to execute.
27141 // For some X86 architectures, this is even worse because for instance for
27142 // stores, the complex addressing mode forces the instruction to use the
27143 // "load" ports instead of the dedicated "store" port.
27144 // E.g., on Haswell:
27145 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
27146 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
27147 if (isLegalAddressingMode(AM, Ty))
27148 // Scale represents reg2 * scale, thus account for 1
27149 // as soon as we use a second register.
27150 return AM.Scale != 0;
27154 bool X86TargetLowering::isTargetFTOL() const {
27155 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();