1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<bool> ExperimentalVectorShuffleLegality(
76 "x86-experimental-vector-shuffle-legality", cl::init(false),
77 cl::desc("Enable experimental shuffle legality based on the experimental "
78 "shuffle lowering. Should only be used with the experimental "
82 static cl::opt<int> ReciprocalEstimateRefinementSteps(
83 "x86-recip-refinement-steps", cl::init(1),
84 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
85 "result of the hardware reciprocal estimate instruction."),
88 // Forward declarations.
89 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
92 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
93 SelectionDAG &DAG, SDLoc dl,
94 unsigned vectorWidth) {
95 assert((vectorWidth == 128 || vectorWidth == 256) &&
96 "Unsupported vector width");
97 EVT VT = Vec.getValueType();
98 EVT ElVT = VT.getVectorElementType();
99 unsigned Factor = VT.getSizeInBits()/vectorWidth;
100 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
101 VT.getVectorNumElements()/Factor);
103 // Extract from UNDEF is UNDEF.
104 if (Vec.getOpcode() == ISD::UNDEF)
105 return DAG.getUNDEF(ResultVT);
107 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
108 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
110 // This is the index of the first element of the vectorWidth-bit chunk
112 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
115 // If the input is a buildvector just emit a smaller one.
116 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
117 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
118 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
122 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
125 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
126 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
127 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
128 /// instructions or a simple subregister reference. Idx is an index in the
129 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
130 /// lowering EXTRACT_VECTOR_ELT operations easier.
131 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
132 SelectionDAG &DAG, SDLoc dl) {
133 assert((Vec.getValueType().is256BitVector() ||
134 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
138 /// Generate a DAG to grab 256-bits from a 512-bit vector.
139 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
140 SelectionDAG &DAG, SDLoc dl) {
141 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
142 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
145 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
146 unsigned IdxVal, SelectionDAG &DAG,
147 SDLoc dl, unsigned vectorWidth) {
148 assert((vectorWidth == 128 || vectorWidth == 256) &&
149 "Unsupported vector width");
150 // Inserting UNDEF is Result
151 if (Vec.getOpcode() == ISD::UNDEF)
153 EVT VT = Vec.getValueType();
154 EVT ElVT = VT.getVectorElementType();
155 EVT ResultVT = Result.getValueType();
157 // Insert the relevant vectorWidth bits.
158 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
160 // This is the index of the first element of the vectorWidth-bit chunk
162 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
165 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
166 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
169 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
170 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
171 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
172 /// simple superregister reference. Idx is an index in the 128 bits
173 /// we want. It need not be aligned to a 128-bit boundary. That makes
174 /// lowering INSERT_VECTOR_ELT operations easier.
175 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
176 SelectionDAG &DAG,SDLoc dl) {
177 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
178 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
181 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
182 SelectionDAG &DAG, SDLoc dl) {
183 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
184 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
187 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
188 /// instructions. This is used because creating CONCAT_VECTOR nodes of
189 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
190 /// large BUILD_VECTORS.
191 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
198 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
199 unsigned NumElems, SelectionDAG &DAG,
201 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
202 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
205 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
206 const X86Subtarget &STI)
207 : TargetLowering(TM), Subtarget(&STI) {
208 X86ScalarSSEf64 = Subtarget->hasSSE2();
209 X86ScalarSSEf32 = Subtarget->hasSSE1();
210 TD = getDataLayout();
212 // Set up the TargetLowering object.
213 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
215 // X86 is weird. It always uses i8 for shift amounts and setcc results.
216 setBooleanContents(ZeroOrOneBooleanContent);
217 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
218 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
220 // For 64-bit, since we have so many registers, use the ILP scheduler.
221 // For 32-bit, use the register pressure specific scheduling.
222 // For Atom, always use ILP scheduling.
223 if (Subtarget->isAtom())
224 setSchedulingPreference(Sched::ILP);
225 else if (Subtarget->is64Bit())
226 setSchedulingPreference(Sched::ILP);
228 setSchedulingPreference(Sched::RegPressure);
229 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
230 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
232 // Bypass expensive divides on Atom when compiling with O2.
233 if (TM.getOptLevel() >= CodeGenOpt::Default) {
234 if (Subtarget->hasSlowDivide32())
235 addBypassSlowDiv(32, 8);
236 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
237 addBypassSlowDiv(64, 16);
240 if (Subtarget->isTargetKnownWindowsMSVC()) {
241 // Setup Windows compiler runtime calls.
242 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
243 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
244 setLibcallName(RTLIB::SREM_I64, "_allrem");
245 setLibcallName(RTLIB::UREM_I64, "_aullrem");
246 setLibcallName(RTLIB::MUL_I64, "_allmul");
247 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
248 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
249 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
250 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
251 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
253 // The _ftol2 runtime function has an unusual calling conv, which
254 // is modeled by a special pseudo-instruction.
255 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
256 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
257 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
258 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
261 if (Subtarget->isTargetDarwin()) {
262 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
263 setUseUnderscoreSetJmp(false);
264 setUseUnderscoreLongJmp(false);
265 } else if (Subtarget->isTargetWindowsGNU()) {
266 // MS runtime is weird: it exports _setjmp, but longjmp!
267 setUseUnderscoreSetJmp(true);
268 setUseUnderscoreLongJmp(false);
270 setUseUnderscoreSetJmp(true);
271 setUseUnderscoreLongJmp(true);
274 // Set up the register classes.
275 addRegisterClass(MVT::i8, &X86::GR8RegClass);
276 addRegisterClass(MVT::i16, &X86::GR16RegClass);
277 addRegisterClass(MVT::i32, &X86::GR32RegClass);
278 if (Subtarget->is64Bit())
279 addRegisterClass(MVT::i64, &X86::GR64RegClass);
281 for (MVT VT : MVT::integer_valuetypes())
282 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
284 // We don't accept any truncstore of integer registers.
285 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
286 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
287 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
288 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
289 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
290 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
292 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
294 // SETOEQ and SETUNE require checking two conditions.
295 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
296 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
297 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
298 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
299 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
300 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
302 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
304 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
305 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
306 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
308 if (Subtarget->is64Bit()) {
309 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
310 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
311 } else if (!TM.Options.UseSoftFloat) {
312 // We have an algorithm for SSE2->double, and we turn this into a
313 // 64-bit FILD followed by conditional FADD for other targets.
314 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
315 // We have an algorithm for SSE2, and we turn this into a 64-bit
316 // FILD for other targets.
317 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
320 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
322 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
323 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
325 if (!TM.Options.UseSoftFloat) {
326 // SSE has no i16 to fp conversion, only i32
327 if (X86ScalarSSEf32) {
328 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
329 // f32 and f64 cases are Legal, f80 case is not
330 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
332 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
333 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
336 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
337 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
340 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
341 // are Legal, f80 is custom lowered.
342 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
343 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
345 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
347 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
348 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
350 if (X86ScalarSSEf32) {
351 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
352 // f32 and f64 cases are Legal, f80 case is not
353 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
355 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
356 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
359 // Handle FP_TO_UINT by promoting the destination to a larger signed
361 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
362 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
363 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
365 if (Subtarget->is64Bit()) {
366 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
367 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
368 } else if (!TM.Options.UseSoftFloat) {
369 // Since AVX is a superset of SSE3, only check for SSE here.
370 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
371 // Expand FP_TO_UINT into a select.
372 // FIXME: We would like to use a Custom expander here eventually to do
373 // the optimal thing for SSE vs. the default expansion in the legalizer.
374 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
376 // With SSE3 we can use fisttpll to convert to a signed i64; without
377 // SSE, we're stuck with a fistpll.
378 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
381 if (isTargetFTOL()) {
382 // Use the _ftol2 runtime function, which has a pseudo-instruction
383 // to handle its weird calling convention.
384 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
387 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
388 if (!X86ScalarSSEf64) {
389 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
390 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
391 if (Subtarget->is64Bit()) {
392 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
393 // Without SSE, i64->f64 goes through memory.
394 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
398 // Scalar integer divide and remainder are lowered to use operations that
399 // produce two results, to match the available instructions. This exposes
400 // the two-result form to trivial CSE, which is able to combine x/y and x%y
401 // into a single instruction.
403 // Scalar integer multiply-high is also lowered to use two-result
404 // operations, to match the available instructions. However, plain multiply
405 // (low) operations are left as Legal, as there are single-result
406 // instructions for this in x86. Using the two-result multiply instructions
407 // when both high and low results are needed must be arranged by dagcombine.
408 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
410 setOperationAction(ISD::MULHS, VT, Expand);
411 setOperationAction(ISD::MULHU, VT, Expand);
412 setOperationAction(ISD::SDIV, VT, Expand);
413 setOperationAction(ISD::UDIV, VT, Expand);
414 setOperationAction(ISD::SREM, VT, Expand);
415 setOperationAction(ISD::UREM, VT, Expand);
417 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
418 setOperationAction(ISD::ADDC, VT, Custom);
419 setOperationAction(ISD::ADDE, VT, Custom);
420 setOperationAction(ISD::SUBC, VT, Custom);
421 setOperationAction(ISD::SUBE, VT, Custom);
424 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
425 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
426 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
427 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
428 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
429 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
430 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
431 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
432 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
433 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
434 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
435 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
436 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
437 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
438 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
439 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
440 if (Subtarget->is64Bit())
441 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
442 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
443 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
444 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
445 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
446 setOperationAction(ISD::FREM , MVT::f32 , Expand);
447 setOperationAction(ISD::FREM , MVT::f64 , Expand);
448 setOperationAction(ISD::FREM , MVT::f80 , Expand);
449 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
451 // Promote the i8 variants and force them on up to i32 which has a shorter
453 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
454 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
455 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
456 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
457 if (Subtarget->hasBMI()) {
458 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
459 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
460 if (Subtarget->is64Bit())
461 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
463 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
464 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
465 if (Subtarget->is64Bit())
466 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
469 if (Subtarget->hasLZCNT()) {
470 // When promoting the i8 variants, force them to i32 for a shorter
472 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
473 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
478 if (Subtarget->is64Bit())
479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
481 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
482 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
483 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
484 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
485 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
486 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
487 if (Subtarget->is64Bit()) {
488 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
489 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
493 // Special handling for half-precision floating point conversions.
494 // If we don't have F16C support, then lower half float conversions
495 // into library calls.
496 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
497 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
498 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
501 // There's never any support for operations beyond MVT::f32.
502 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
503 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
504 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
505 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
507 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
508 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
509 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
510 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
511 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
512 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
514 if (Subtarget->hasPOPCNT()) {
515 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
517 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
518 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
519 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
520 if (Subtarget->is64Bit())
521 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
524 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
526 if (!Subtarget->hasMOVBE())
527 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
529 // These should be promoted to a larger select which is supported.
530 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
531 // X86 wants to expand cmov itself.
532 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
533 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
534 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
535 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
536 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
537 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
538 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
539 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
540 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
541 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
542 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
543 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
544 if (Subtarget->is64Bit()) {
545 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
546 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
548 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
549 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
550 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
551 // support continuation, user-level threading, and etc.. As a result, no
552 // other SjLj exception interfaces are implemented and please don't build
553 // your own exception handling based on them.
554 // LLVM/Clang supports zero-cost DWARF exception handling.
555 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
556 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
559 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
560 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
561 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
562 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
563 if (Subtarget->is64Bit())
564 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
565 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
566 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
567 if (Subtarget->is64Bit()) {
568 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
569 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
570 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
571 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
572 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
574 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
575 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
576 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
577 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
578 if (Subtarget->is64Bit()) {
579 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
580 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
581 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
584 if (Subtarget->hasSSE1())
585 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
587 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
589 // Expand certain atomics
590 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
592 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
593 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
594 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
597 if (Subtarget->hasCmpxchg16b()) {
598 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
601 // FIXME - use subtarget debug flags
602 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
603 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
604 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
607 if (Subtarget->is64Bit()) {
608 setExceptionPointerRegister(X86::RAX);
609 setExceptionSelectorRegister(X86::RDX);
611 setExceptionPointerRegister(X86::EAX);
612 setExceptionSelectorRegister(X86::EDX);
614 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
615 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
617 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
618 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
620 setOperationAction(ISD::TRAP, MVT::Other, Legal);
621 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
623 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
624 setOperationAction(ISD::VASTART , MVT::Other, Custom);
625 setOperationAction(ISD::VAEND , MVT::Other, Expand);
626 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
627 // TargetInfo::X86_64ABIBuiltinVaList
628 setOperationAction(ISD::VAARG , MVT::Other, Custom);
629 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
631 // TargetInfo::CharPtrBuiltinVaList
632 setOperationAction(ISD::VAARG , MVT::Other, Expand);
633 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
636 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
637 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
639 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
641 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
642 // f32 and f64 use SSE.
643 // Set up the FP register classes.
644 addRegisterClass(MVT::f32, &X86::FR32RegClass);
645 addRegisterClass(MVT::f64, &X86::FR64RegClass);
647 // Use ANDPD to simulate FABS.
648 setOperationAction(ISD::FABS , MVT::f64, Custom);
649 setOperationAction(ISD::FABS , MVT::f32, Custom);
651 // Use XORP to simulate FNEG.
652 setOperationAction(ISD::FNEG , MVT::f64, Custom);
653 setOperationAction(ISD::FNEG , MVT::f32, Custom);
655 // Use ANDPD and ORPD to simulate FCOPYSIGN.
656 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
657 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
659 // Lower this to FGETSIGNx86 plus an AND.
660 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
661 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
663 // We don't support sin/cos/fmod
664 setOperationAction(ISD::FSIN , MVT::f64, Expand);
665 setOperationAction(ISD::FCOS , MVT::f64, Expand);
666 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
667 setOperationAction(ISD::FSIN , MVT::f32, Expand);
668 setOperationAction(ISD::FCOS , MVT::f32, Expand);
669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
671 // Expand FP immediates into loads from the stack, except for the special
673 addLegalFPImmediate(APFloat(+0.0)); // xorpd
674 addLegalFPImmediate(APFloat(+0.0f)); // xorps
675 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
676 // Use SSE for f32, x87 for f64.
677 // Set up the FP register classes.
678 addRegisterClass(MVT::f32, &X86::FR32RegClass);
679 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
681 // Use ANDPS to simulate FABS.
682 setOperationAction(ISD::FABS , MVT::f32, Custom);
684 // Use XORP to simulate FNEG.
685 setOperationAction(ISD::FNEG , MVT::f32, Custom);
687 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
689 // Use ANDPS and ORPS to simulate FCOPYSIGN.
690 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
691 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
693 // We don't support sin/cos/fmod
694 setOperationAction(ISD::FSIN , MVT::f32, Expand);
695 setOperationAction(ISD::FCOS , MVT::f32, Expand);
696 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
698 // Special cases we handle for FP constants.
699 addLegalFPImmediate(APFloat(+0.0f)); // xorps
700 addLegalFPImmediate(APFloat(+0.0)); // FLD0
701 addLegalFPImmediate(APFloat(+1.0)); // FLD1
702 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
703 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
705 if (!TM.Options.UnsafeFPMath) {
706 setOperationAction(ISD::FSIN , MVT::f64, Expand);
707 setOperationAction(ISD::FCOS , MVT::f64, Expand);
708 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
710 } else if (!TM.Options.UseSoftFloat) {
711 // f32 and f64 in x87.
712 // Set up the FP register classes.
713 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
714 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
716 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
717 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
718 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
719 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
721 if (!TM.Options.UnsafeFPMath) {
722 setOperationAction(ISD::FSIN , MVT::f64, Expand);
723 setOperationAction(ISD::FSIN , MVT::f32, Expand);
724 setOperationAction(ISD::FCOS , MVT::f64, Expand);
725 setOperationAction(ISD::FCOS , MVT::f32, Expand);
726 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
727 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
729 addLegalFPImmediate(APFloat(+0.0)); // FLD0
730 addLegalFPImmediate(APFloat(+1.0)); // FLD1
731 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
732 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
733 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
734 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
735 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
736 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
739 // We don't support FMA.
740 setOperationAction(ISD::FMA, MVT::f64, Expand);
741 setOperationAction(ISD::FMA, MVT::f32, Expand);
743 // Long double always uses X87.
744 if (!TM.Options.UseSoftFloat) {
745 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
746 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
747 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
749 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
750 addLegalFPImmediate(TmpFlt); // FLD0
752 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
755 APFloat TmpFlt2(+1.0);
756 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
758 addLegalFPImmediate(TmpFlt2); // FLD1
759 TmpFlt2.changeSign();
760 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
763 if (!TM.Options.UnsafeFPMath) {
764 setOperationAction(ISD::FSIN , MVT::f80, Expand);
765 setOperationAction(ISD::FCOS , MVT::f80, Expand);
766 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
769 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
770 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
771 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
772 setOperationAction(ISD::FRINT, MVT::f80, Expand);
773 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
774 setOperationAction(ISD::FMA, MVT::f80, Expand);
777 // Always use a library call for pow.
778 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
779 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
780 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
782 setOperationAction(ISD::FLOG, MVT::f80, Expand);
783 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
784 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
785 setOperationAction(ISD::FEXP, MVT::f80, Expand);
786 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
787 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
788 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
790 // First set operation action for all vector types to either promote
791 // (for widening) or expand (for scalarization). Then we will selectively
792 // turn on ones that can be effectively codegen'd.
793 for (MVT VT : MVT::vector_valuetypes()) {
794 setOperationAction(ISD::ADD , VT, Expand);
795 setOperationAction(ISD::SUB , VT, Expand);
796 setOperationAction(ISD::FADD, VT, Expand);
797 setOperationAction(ISD::FNEG, VT, Expand);
798 setOperationAction(ISD::FSUB, VT, Expand);
799 setOperationAction(ISD::MUL , VT, Expand);
800 setOperationAction(ISD::FMUL, VT, Expand);
801 setOperationAction(ISD::SDIV, VT, Expand);
802 setOperationAction(ISD::UDIV, VT, Expand);
803 setOperationAction(ISD::FDIV, VT, Expand);
804 setOperationAction(ISD::SREM, VT, Expand);
805 setOperationAction(ISD::UREM, VT, Expand);
806 setOperationAction(ISD::LOAD, VT, Expand);
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
808 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
809 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
810 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
811 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
812 setOperationAction(ISD::FABS, VT, Expand);
813 setOperationAction(ISD::FSIN, VT, Expand);
814 setOperationAction(ISD::FSINCOS, VT, Expand);
815 setOperationAction(ISD::FCOS, VT, Expand);
816 setOperationAction(ISD::FSINCOS, VT, Expand);
817 setOperationAction(ISD::FREM, VT, Expand);
818 setOperationAction(ISD::FMA, VT, Expand);
819 setOperationAction(ISD::FPOWI, VT, Expand);
820 setOperationAction(ISD::FSQRT, VT, Expand);
821 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
822 setOperationAction(ISD::FFLOOR, VT, Expand);
823 setOperationAction(ISD::FCEIL, VT, Expand);
824 setOperationAction(ISD::FTRUNC, VT, Expand);
825 setOperationAction(ISD::FRINT, VT, Expand);
826 setOperationAction(ISD::FNEARBYINT, VT, Expand);
827 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
828 setOperationAction(ISD::MULHS, VT, Expand);
829 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
830 setOperationAction(ISD::MULHU, VT, Expand);
831 setOperationAction(ISD::SDIVREM, VT, Expand);
832 setOperationAction(ISD::UDIVREM, VT, Expand);
833 setOperationAction(ISD::FPOW, VT, Expand);
834 setOperationAction(ISD::CTPOP, VT, Expand);
835 setOperationAction(ISD::CTTZ, VT, Expand);
836 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
837 setOperationAction(ISD::CTLZ, VT, Expand);
838 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
839 setOperationAction(ISD::SHL, VT, Expand);
840 setOperationAction(ISD::SRA, VT, Expand);
841 setOperationAction(ISD::SRL, VT, Expand);
842 setOperationAction(ISD::ROTL, VT, Expand);
843 setOperationAction(ISD::ROTR, VT, Expand);
844 setOperationAction(ISD::BSWAP, VT, Expand);
845 setOperationAction(ISD::SETCC, VT, Expand);
846 setOperationAction(ISD::FLOG, VT, Expand);
847 setOperationAction(ISD::FLOG2, VT, Expand);
848 setOperationAction(ISD::FLOG10, VT, Expand);
849 setOperationAction(ISD::FEXP, VT, Expand);
850 setOperationAction(ISD::FEXP2, VT, Expand);
851 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
852 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
853 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
854 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
855 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
856 setOperationAction(ISD::TRUNCATE, VT, Expand);
857 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
858 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
859 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
860 setOperationAction(ISD::VSELECT, VT, Expand);
861 setOperationAction(ISD::SELECT_CC, VT, Expand);
862 for (MVT InnerVT : MVT::vector_valuetypes()) {
863 setTruncStoreAction(InnerVT, VT, Expand);
865 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
866 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
868 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
869 // types, we have to deal with them whether we ask for Expansion or not.
870 // Setting Expand causes its own optimisation problems though, so leave
872 if (VT.getVectorElementType() == MVT::i1)
873 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
877 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
878 // with -msoft-float, disable use of MMX as well.
879 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
880 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
881 // No operations on x86mmx supported, everything uses intrinsics.
884 // MMX-sized vectors (other than x86mmx) are expected to be expanded
885 // into smaller operations.
886 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
887 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
888 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
889 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
890 setOperationAction(ISD::AND, MVT::v8i8, Expand);
891 setOperationAction(ISD::AND, MVT::v4i16, Expand);
892 setOperationAction(ISD::AND, MVT::v2i32, Expand);
893 setOperationAction(ISD::AND, MVT::v1i64, Expand);
894 setOperationAction(ISD::OR, MVT::v8i8, Expand);
895 setOperationAction(ISD::OR, MVT::v4i16, Expand);
896 setOperationAction(ISD::OR, MVT::v2i32, Expand);
897 setOperationAction(ISD::OR, MVT::v1i64, Expand);
898 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
899 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
900 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
901 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
902 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
903 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
904 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
905 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
907 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
908 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
909 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
910 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
911 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
912 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
913 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
914 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
916 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
917 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
919 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
920 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
921 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
922 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
923 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
924 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
925 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
926 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
927 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
928 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
929 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
930 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
931 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
934 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
935 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
937 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
938 // registers cannot be used even for integer operations.
939 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
940 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
941 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
942 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
944 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
945 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
946 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
947 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
948 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
949 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
950 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
951 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
952 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
953 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
954 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
955 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
956 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
957 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
958 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
959 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
960 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
961 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
962 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
963 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
964 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
965 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
967 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
968 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
969 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
970 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
972 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
973 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
974 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
976 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
978 // Only provide customized ctpop vector bit twiddling for vector types we
979 // know to perform better than using the popcnt instructions on each vector
980 // element. If popcnt isn't supported, always provide the custom version.
981 if (!Subtarget->hasPOPCNT()) {
982 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
983 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
986 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
987 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
988 MVT VT = (MVT::SimpleValueType)i;
989 // Do not attempt to custom lower non-power-of-2 vectors
990 if (!isPowerOf2_32(VT.getVectorNumElements()))
992 // Do not attempt to custom lower non-128-bit vectors
993 if (!VT.is128BitVector())
995 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
996 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
997 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1000 // We support custom legalizing of sext and anyext loads for specific
1001 // memory vector types which we can load as a scalar (or sequence of
1002 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1003 // loads these must work with a single scalar load.
1004 for (MVT VT : MVT::integer_vector_valuetypes()) {
1005 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1006 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1007 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1008 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1009 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1010 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1011 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1012 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1013 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1016 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1017 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1018 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1019 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1020 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1021 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1023 if (Subtarget->is64Bit()) {
1024 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1025 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1028 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1029 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1030 MVT VT = (MVT::SimpleValueType)i;
1032 // Do not attempt to promote non-128-bit vectors
1033 if (!VT.is128BitVector())
1036 setOperationAction(ISD::AND, VT, Promote);
1037 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1038 setOperationAction(ISD::OR, VT, Promote);
1039 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1040 setOperationAction(ISD::XOR, VT, Promote);
1041 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1042 setOperationAction(ISD::LOAD, VT, Promote);
1043 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1044 setOperationAction(ISD::SELECT, VT, Promote);
1045 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1048 // Custom lower v2i64 and v2f64 selects.
1049 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1050 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1051 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1052 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1054 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1055 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1057 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1058 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1059 // As there is no 64-bit GPR available, we need build a special custom
1060 // sequence to convert from v2i32 to v2f32.
1061 if (!Subtarget->is64Bit())
1062 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1064 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1065 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1067 for (MVT VT : MVT::fp_vector_valuetypes())
1068 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1070 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1071 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1072 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1075 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1076 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1077 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1078 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1079 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1080 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1081 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1082 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1083 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1084 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1085 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1087 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1088 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1089 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1090 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1091 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1092 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1093 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1094 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1095 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1096 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1098 // FIXME: Do we need to handle scalar-to-vector here?
1099 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1101 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1102 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1103 setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
1104 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
1105 setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
1106 // There is no BLENDI for byte vectors. We don't need to custom lower
1107 // some vselects for now.
1108 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1110 // SSE41 brings specific instructions for doing vector sign extend even in
1111 // cases where we don't have SRA.
1112 for (MVT VT : MVT::integer_vector_valuetypes()) {
1113 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1114 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1115 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1118 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1119 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1120 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1121 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1122 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1123 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1124 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1126 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1127 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1128 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1129 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1130 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1131 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1133 // i8 and i16 vectors are custom because the source register and source
1134 // source memory operand types are not the same width. f32 vectors are
1135 // custom since the immediate controlling the insert encodes additional
1137 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1138 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1139 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1140 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1142 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1144 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1145 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1147 // FIXME: these should be Legal, but that's only for the case where
1148 // the index is constant. For now custom expand to deal with that.
1149 if (Subtarget->is64Bit()) {
1150 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1151 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1155 if (Subtarget->hasSSE2()) {
1156 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1157 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1159 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1160 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1162 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1163 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1165 // In the customized shift lowering, the legal cases in AVX2 will be
1167 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1168 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1170 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1171 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1173 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1176 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1177 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1178 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1179 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1180 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1181 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1182 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1184 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1185 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1186 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1188 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1190 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1191 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1192 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1193 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1194 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1195 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1196 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1197 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1198 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1199 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1201 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1202 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1203 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1204 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1205 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1206 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1207 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1209 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1210 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1211 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1212 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1214 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1215 // even though v8i16 is a legal type.
1216 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1217 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1218 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1220 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1221 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1222 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1224 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1225 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1227 for (MVT VT : MVT::fp_vector_valuetypes())
1228 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1230 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1231 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1233 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1234 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1236 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1237 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1239 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1240 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1241 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1242 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1244 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1245 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1246 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1248 setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
1249 setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
1250 setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
1251 setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
1253 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1254 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1255 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1256 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1257 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1258 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1259 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1260 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1261 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1262 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1263 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1264 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1266 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1267 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1268 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1269 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1270 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1271 setOperationAction(ISD::FMA, MVT::f32, Legal);
1272 setOperationAction(ISD::FMA, MVT::f64, Legal);
1275 if (Subtarget->hasInt256()) {
1276 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1277 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1278 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1279 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1281 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1282 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1283 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1284 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1286 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1287 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1288 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1289 // Don't lower v32i8 because there is no 128-bit byte mul
1291 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1292 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1293 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1294 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1296 setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
1297 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1299 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1300 // when we have a 256bit-wide blend with immediate.
1301 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1303 // Only provide customized ctpop vector bit twiddling for vector types we
1304 // know to perform better than using the popcnt instructions on each
1305 // vector element. If popcnt isn't supported, always provide the custom
1307 if (!Subtarget->hasPOPCNT())
1308 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1310 // Custom CTPOP always performs better on natively supported v8i32
1311 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1313 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1314 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1315 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1316 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1317 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1318 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1319 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1321 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1322 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1323 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1324 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1325 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1326 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1328 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1329 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1330 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1331 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1333 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1334 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1335 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1336 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1338 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1339 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1340 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1341 // Don't lower v32i8 because there is no 128-bit byte mul
1344 // In the customized shift lowering, the legal cases in AVX2 will be
1346 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1347 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1349 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1350 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1352 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1354 // Custom lower several nodes for 256-bit types.
1355 for (MVT VT : MVT::vector_valuetypes()) {
1356 if (VT.getScalarSizeInBits() >= 32) {
1357 setOperationAction(ISD::MLOAD, VT, Legal);
1358 setOperationAction(ISD::MSTORE, VT, Legal);
1360 // Extract subvector is special because the value type
1361 // (result) is 128-bit but the source is 256-bit wide.
1362 if (VT.is128BitVector()) {
1363 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1365 // Do not attempt to custom lower other non-256-bit vectors
1366 if (!VT.is256BitVector())
1369 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1370 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1371 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1372 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1373 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1374 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1375 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1378 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1379 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1380 MVT VT = (MVT::SimpleValueType)i;
1382 // Do not attempt to promote non-256-bit vectors
1383 if (!VT.is256BitVector())
1386 setOperationAction(ISD::AND, VT, Promote);
1387 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1388 setOperationAction(ISD::OR, VT, Promote);
1389 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1390 setOperationAction(ISD::XOR, VT, Promote);
1391 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1392 setOperationAction(ISD::LOAD, VT, Promote);
1393 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1394 setOperationAction(ISD::SELECT, VT, Promote);
1395 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1399 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1400 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1401 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1402 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1403 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1405 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1406 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1407 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1409 for (MVT VT : MVT::fp_vector_valuetypes())
1410 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1412 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1413 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1414 setOperationAction(ISD::XOR, MVT::i1, Legal);
1415 setOperationAction(ISD::OR, MVT::i1, Legal);
1416 setOperationAction(ISD::AND, MVT::i1, Legal);
1417 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1418 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1419 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1420 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1421 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1423 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1424 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1425 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1426 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1427 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1428 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1430 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1431 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1432 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1433 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1434 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1435 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1436 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1437 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1439 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1440 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1441 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1442 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1443 if (Subtarget->is64Bit()) {
1444 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1445 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1446 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1447 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1449 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1450 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1451 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1452 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1453 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1454 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1455 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1456 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1457 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1458 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1459 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1460 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1461 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1462 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1464 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1465 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1466 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1467 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1468 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1469 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1470 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1471 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1472 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1473 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1474 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1475 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1476 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1478 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1479 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1480 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1481 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1482 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1483 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1485 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1486 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1488 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1490 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1491 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1492 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1493 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1494 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1495 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1496 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1497 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1498 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1500 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1501 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1503 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1504 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1506 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1508 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1509 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1511 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1512 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1514 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1515 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1517 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1518 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1519 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1520 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1521 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1522 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1524 if (Subtarget->hasCDI()) {
1525 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1526 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1529 // Custom lower several nodes.
1530 for (MVT VT : MVT::vector_valuetypes()) {
1531 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1532 // Extract subvector is special because the value type
1533 // (result) is 256/128-bit but the source is 512-bit wide.
1534 if (VT.is128BitVector() || VT.is256BitVector()) {
1535 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1537 if (VT.getVectorElementType() == MVT::i1)
1538 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1540 // Do not attempt to custom lower other non-512-bit vectors
1541 if (!VT.is512BitVector())
1544 if ( EltSize >= 32) {
1545 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1546 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1547 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1548 setOperationAction(ISD::VSELECT, VT, Legal);
1549 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1550 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1551 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1552 setOperationAction(ISD::MLOAD, VT, Legal);
1553 setOperationAction(ISD::MSTORE, VT, Legal);
1556 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1557 MVT VT = (MVT::SimpleValueType)i;
1559 // Do not attempt to promote non-512-bit vectors.
1560 if (!VT.is512BitVector())
1563 setOperationAction(ISD::SELECT, VT, Promote);
1564 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1568 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1569 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1570 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1572 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1573 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1575 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1576 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1577 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1578 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1579 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1580 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1581 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1582 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1583 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1585 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1586 const MVT VT = (MVT::SimpleValueType)i;
1588 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1590 // Do not attempt to promote non-512-bit vectors.
1591 if (!VT.is512BitVector())
1595 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1596 setOperationAction(ISD::VSELECT, VT, Legal);
1601 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1602 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1603 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1605 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1606 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1607 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1609 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1610 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1611 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1612 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1613 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1614 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1617 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1618 // of this type with custom code.
1619 for (MVT VT : MVT::vector_valuetypes())
1620 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1622 // We want to custom lower some of our intrinsics.
1623 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1624 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1625 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1626 if (!Subtarget->is64Bit())
1627 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1629 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1630 // handle type legalization for these operations here.
1632 // FIXME: We really should do custom legalization for addition and
1633 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1634 // than generic legalization for 64-bit multiplication-with-overflow, though.
1635 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1636 // Add/Sub/Mul with overflow operations are custom lowered.
1638 setOperationAction(ISD::SADDO, VT, Custom);
1639 setOperationAction(ISD::UADDO, VT, Custom);
1640 setOperationAction(ISD::SSUBO, VT, Custom);
1641 setOperationAction(ISD::USUBO, VT, Custom);
1642 setOperationAction(ISD::SMULO, VT, Custom);
1643 setOperationAction(ISD::UMULO, VT, Custom);
1647 if (!Subtarget->is64Bit()) {
1648 // These libcalls are not available in 32-bit.
1649 setLibcallName(RTLIB::SHL_I128, nullptr);
1650 setLibcallName(RTLIB::SRL_I128, nullptr);
1651 setLibcallName(RTLIB::SRA_I128, nullptr);
1654 // Combine sin / cos into one node or libcall if possible.
1655 if (Subtarget->hasSinCos()) {
1656 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1657 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1658 if (Subtarget->isTargetDarwin()) {
1659 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1660 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1661 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1662 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1666 if (Subtarget->isTargetWin64()) {
1667 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1668 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1669 setOperationAction(ISD::SREM, MVT::i128, Custom);
1670 setOperationAction(ISD::UREM, MVT::i128, Custom);
1671 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1672 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1675 // We have target-specific dag combine patterns for the following nodes:
1676 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1677 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1678 setTargetDAGCombine(ISD::BITCAST);
1679 setTargetDAGCombine(ISD::VSELECT);
1680 setTargetDAGCombine(ISD::SELECT);
1681 setTargetDAGCombine(ISD::SHL);
1682 setTargetDAGCombine(ISD::SRA);
1683 setTargetDAGCombine(ISD::SRL);
1684 setTargetDAGCombine(ISD::OR);
1685 setTargetDAGCombine(ISD::AND);
1686 setTargetDAGCombine(ISD::ADD);
1687 setTargetDAGCombine(ISD::FADD);
1688 setTargetDAGCombine(ISD::FSUB);
1689 setTargetDAGCombine(ISD::FMA);
1690 setTargetDAGCombine(ISD::SUB);
1691 setTargetDAGCombine(ISD::LOAD);
1692 setTargetDAGCombine(ISD::MLOAD);
1693 setTargetDAGCombine(ISD::STORE);
1694 setTargetDAGCombine(ISD::MSTORE);
1695 setTargetDAGCombine(ISD::ZERO_EXTEND);
1696 setTargetDAGCombine(ISD::ANY_EXTEND);
1697 setTargetDAGCombine(ISD::SIGN_EXTEND);
1698 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1699 setTargetDAGCombine(ISD::TRUNCATE);
1700 setTargetDAGCombine(ISD::SINT_TO_FP);
1701 setTargetDAGCombine(ISD::SETCC);
1702 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1703 setTargetDAGCombine(ISD::BUILD_VECTOR);
1704 setTargetDAGCombine(ISD::MUL);
1705 setTargetDAGCombine(ISD::XOR);
1707 computeRegisterProperties();
1709 // On Darwin, -Os means optimize for size without hurting performance,
1710 // do not reduce the limit.
1711 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1712 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1713 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1714 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1715 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1716 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1717 setPrefLoopAlignment(4); // 2^4 bytes.
1719 // Predictable cmov don't hurt on atom because it's in-order.
1720 PredictableSelectIsExpensive = !Subtarget->isAtom();
1721 EnableExtLdPromotion = true;
1722 setPrefFunctionAlignment(4); // 2^4 bytes.
1724 verifyIntrinsicTables();
1727 // This has so far only been implemented for 64-bit MachO.
1728 bool X86TargetLowering::useLoadStackGuardNode() const {
1729 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1732 TargetLoweringBase::LegalizeTypeAction
1733 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1734 if (ExperimentalVectorWideningLegalization &&
1735 VT.getVectorNumElements() != 1 &&
1736 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1737 return TypeWidenVector;
1739 return TargetLoweringBase::getPreferredVectorAction(VT);
1742 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1744 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1746 const unsigned NumElts = VT.getVectorNumElements();
1747 const EVT EltVT = VT.getVectorElementType();
1748 if (VT.is512BitVector()) {
1749 if (Subtarget->hasAVX512())
1750 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1751 EltVT == MVT::f32 || EltVT == MVT::f64)
1753 case 8: return MVT::v8i1;
1754 case 16: return MVT::v16i1;
1756 if (Subtarget->hasBWI())
1757 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1759 case 32: return MVT::v32i1;
1760 case 64: return MVT::v64i1;
1764 if (VT.is256BitVector() || VT.is128BitVector()) {
1765 if (Subtarget->hasVLX())
1766 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1767 EltVT == MVT::f32 || EltVT == MVT::f64)
1769 case 2: return MVT::v2i1;
1770 case 4: return MVT::v4i1;
1771 case 8: return MVT::v8i1;
1773 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1774 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1776 case 8: return MVT::v8i1;
1777 case 16: return MVT::v16i1;
1778 case 32: return MVT::v32i1;
1782 return VT.changeVectorElementTypeToInteger();
1785 /// Helper for getByValTypeAlignment to determine
1786 /// the desired ByVal argument alignment.
1787 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1790 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1791 if (VTy->getBitWidth() == 128)
1793 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1794 unsigned EltAlign = 0;
1795 getMaxByValAlign(ATy->getElementType(), EltAlign);
1796 if (EltAlign > MaxAlign)
1797 MaxAlign = EltAlign;
1798 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1799 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1800 unsigned EltAlign = 0;
1801 getMaxByValAlign(STy->getElementType(i), EltAlign);
1802 if (EltAlign > MaxAlign)
1803 MaxAlign = EltAlign;
1810 /// Return the desired alignment for ByVal aggregate
1811 /// function arguments in the caller parameter area. For X86, aggregates
1812 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1813 /// are at 4-byte boundaries.
1814 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1815 if (Subtarget->is64Bit()) {
1816 // Max of 8 and alignment of type.
1817 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1824 if (Subtarget->hasSSE1())
1825 getMaxByValAlign(Ty, Align);
1829 /// Returns the target specific optimal type for load
1830 /// and store operations as a result of memset, memcpy, and memmove
1831 /// lowering. If DstAlign is zero that means it's safe to destination
1832 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1833 /// means there isn't a need to check it against alignment requirement,
1834 /// probably because the source does not need to be loaded. If 'IsMemset' is
1835 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1836 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1837 /// source is constant so it does not need to be loaded.
1838 /// It returns EVT::Other if the type should be determined using generic
1839 /// target-independent logic.
1841 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1842 unsigned DstAlign, unsigned SrcAlign,
1843 bool IsMemset, bool ZeroMemset,
1845 MachineFunction &MF) const {
1846 const Function *F = MF.getFunction();
1847 if ((!IsMemset || ZeroMemset) &&
1848 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1850 (Subtarget->isUnalignedMemAccessFast() ||
1851 ((DstAlign == 0 || DstAlign >= 16) &&
1852 (SrcAlign == 0 || SrcAlign >= 16)))) {
1854 if (Subtarget->hasInt256())
1856 if (Subtarget->hasFp256())
1859 if (Subtarget->hasSSE2())
1861 if (Subtarget->hasSSE1())
1863 } else if (!MemcpyStrSrc && Size >= 8 &&
1864 !Subtarget->is64Bit() &&
1865 Subtarget->hasSSE2()) {
1866 // Do not use f64 to lower memcpy if source is string constant. It's
1867 // better to use i32 to avoid the loads.
1871 if (Subtarget->is64Bit() && Size >= 8)
1876 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1878 return X86ScalarSSEf32;
1879 else if (VT == MVT::f64)
1880 return X86ScalarSSEf64;
1885 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1890 *Fast = Subtarget->isUnalignedMemAccessFast();
1894 /// Return the entry encoding for a jump table in the
1895 /// current function. The returned value is a member of the
1896 /// MachineJumpTableInfo::JTEntryKind enum.
1897 unsigned X86TargetLowering::getJumpTableEncoding() const {
1898 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1900 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1901 Subtarget->isPICStyleGOT())
1902 return MachineJumpTableInfo::EK_Custom32;
1904 // Otherwise, use the normal jump table encoding heuristics.
1905 return TargetLowering::getJumpTableEncoding();
1909 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1910 const MachineBasicBlock *MBB,
1911 unsigned uid,MCContext &Ctx) const{
1912 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1913 Subtarget->isPICStyleGOT());
1914 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1916 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1917 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1920 /// Returns relocation base for the given PIC jumptable.
1921 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1922 SelectionDAG &DAG) const {
1923 if (!Subtarget->is64Bit())
1924 // This doesn't have SDLoc associated with it, but is not really the
1925 // same as a Register.
1926 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1930 /// This returns the relocation base for the given PIC jumptable,
1931 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1932 const MCExpr *X86TargetLowering::
1933 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1934 MCContext &Ctx) const {
1935 // X86-64 uses RIP relative addressing based on the jump table label.
1936 if (Subtarget->isPICStyleRIPRel())
1937 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1939 // Otherwise, the reference is relative to the PIC base.
1940 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1943 // FIXME: Why this routine is here? Move to RegInfo!
1944 std::pair<const TargetRegisterClass*, uint8_t>
1945 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1946 const TargetRegisterClass *RRC = nullptr;
1948 switch (VT.SimpleTy) {
1950 return TargetLowering::findRepresentativeClass(VT);
1951 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1952 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1955 RRC = &X86::VR64RegClass;
1957 case MVT::f32: case MVT::f64:
1958 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1959 case MVT::v4f32: case MVT::v2f64:
1960 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1962 RRC = &X86::VR128RegClass;
1965 return std::make_pair(RRC, Cost);
1968 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1969 unsigned &Offset) const {
1970 if (!Subtarget->isTargetLinux())
1973 if (Subtarget->is64Bit()) {
1974 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1976 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1988 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1989 unsigned DestAS) const {
1990 assert(SrcAS != DestAS && "Expected different address spaces!");
1992 return SrcAS < 256 && DestAS < 256;
1995 //===----------------------------------------------------------------------===//
1996 // Return Value Calling Convention Implementation
1997 //===----------------------------------------------------------------------===//
1999 #include "X86GenCallingConv.inc"
2002 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2003 MachineFunction &MF, bool isVarArg,
2004 const SmallVectorImpl<ISD::OutputArg> &Outs,
2005 LLVMContext &Context) const {
2006 SmallVector<CCValAssign, 16> RVLocs;
2007 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2008 return CCInfo.CheckReturn(Outs, RetCC_X86);
2011 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2012 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2017 X86TargetLowering::LowerReturn(SDValue Chain,
2018 CallingConv::ID CallConv, bool isVarArg,
2019 const SmallVectorImpl<ISD::OutputArg> &Outs,
2020 const SmallVectorImpl<SDValue> &OutVals,
2021 SDLoc dl, SelectionDAG &DAG) const {
2022 MachineFunction &MF = DAG.getMachineFunction();
2023 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2025 SmallVector<CCValAssign, 16> RVLocs;
2026 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2027 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2030 SmallVector<SDValue, 6> RetOps;
2031 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2032 // Operand #1 = Bytes To Pop
2033 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2036 // Copy the result values into the output registers.
2037 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2038 CCValAssign &VA = RVLocs[i];
2039 assert(VA.isRegLoc() && "Can only return in registers!");
2040 SDValue ValToCopy = OutVals[i];
2041 EVT ValVT = ValToCopy.getValueType();
2043 // Promote values to the appropriate types.
2044 if (VA.getLocInfo() == CCValAssign::SExt)
2045 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2046 else if (VA.getLocInfo() == CCValAssign::ZExt)
2047 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2048 else if (VA.getLocInfo() == CCValAssign::AExt)
2049 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2050 else if (VA.getLocInfo() == CCValAssign::BCvt)
2051 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2053 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2054 "Unexpected FP-extend for return value.");
2056 // If this is x86-64, and we disabled SSE, we can't return FP values,
2057 // or SSE or MMX vectors.
2058 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2059 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2060 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2061 report_fatal_error("SSE register return with SSE disabled");
2063 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2064 // llvm-gcc has never done it right and no one has noticed, so this
2065 // should be OK for now.
2066 if (ValVT == MVT::f64 &&
2067 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2068 report_fatal_error("SSE2 register return with SSE2 disabled");
2070 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2071 // the RET instruction and handled by the FP Stackifier.
2072 if (VA.getLocReg() == X86::FP0 ||
2073 VA.getLocReg() == X86::FP1) {
2074 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2075 // change the value to the FP stack register class.
2076 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2077 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2078 RetOps.push_back(ValToCopy);
2079 // Don't emit a copytoreg.
2083 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2084 // which is returned in RAX / RDX.
2085 if (Subtarget->is64Bit()) {
2086 if (ValVT == MVT::x86mmx) {
2087 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2088 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2089 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2091 // If we don't have SSE2 available, convert to v4f32 so the generated
2092 // register is legal.
2093 if (!Subtarget->hasSSE2())
2094 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2099 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2100 Flag = Chain.getValue(1);
2101 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2104 // The x86-64 ABIs require that for returning structs by value we copy
2105 // the sret argument into %rax/%eax (depending on ABI) for the return.
2106 // Win32 requires us to put the sret argument to %eax as well.
2107 // We saved the argument into a virtual register in the entry block,
2108 // so now we copy the value out and into %rax/%eax.
2110 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2111 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2112 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2113 // either case FuncInfo->setSRetReturnReg() will have been called.
2114 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2115 assert((Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) &&
2116 "No need for an sret register");
2117 SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg, getPointerTy());
2120 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2121 X86::RAX : X86::EAX;
2122 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2123 Flag = Chain.getValue(1);
2125 // RAX/EAX now acts like a return value.
2126 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2129 RetOps[0] = Chain; // Update chain.
2131 // Add the flag if we have it.
2133 RetOps.push_back(Flag);
2135 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2138 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2139 if (N->getNumValues() != 1)
2141 if (!N->hasNUsesOfValue(1, 0))
2144 SDValue TCChain = Chain;
2145 SDNode *Copy = *N->use_begin();
2146 if (Copy->getOpcode() == ISD::CopyToReg) {
2147 // If the copy has a glue operand, we conservatively assume it isn't safe to
2148 // perform a tail call.
2149 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2151 TCChain = Copy->getOperand(0);
2152 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2155 bool HasRet = false;
2156 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2158 if (UI->getOpcode() != X86ISD::RET_FLAG)
2160 // If we are returning more than one value, we can definitely
2161 // not make a tail call see PR19530
2162 if (UI->getNumOperands() > 4)
2164 if (UI->getNumOperands() == 4 &&
2165 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2178 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2179 ISD::NodeType ExtendKind) const {
2181 // TODO: Is this also valid on 32-bit?
2182 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2183 ReturnMVT = MVT::i8;
2185 ReturnMVT = MVT::i32;
2187 EVT MinVT = getRegisterType(Context, ReturnMVT);
2188 return VT.bitsLT(MinVT) ? MinVT : VT;
2191 /// Lower the result values of a call into the
2192 /// appropriate copies out of appropriate physical registers.
2195 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2196 CallingConv::ID CallConv, bool isVarArg,
2197 const SmallVectorImpl<ISD::InputArg> &Ins,
2198 SDLoc dl, SelectionDAG &DAG,
2199 SmallVectorImpl<SDValue> &InVals) const {
2201 // Assign locations to each value returned by this call.
2202 SmallVector<CCValAssign, 16> RVLocs;
2203 bool Is64Bit = Subtarget->is64Bit();
2204 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2206 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2208 // Copy all of the result registers out of their specified physreg.
2209 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2210 CCValAssign &VA = RVLocs[i];
2211 EVT CopyVT = VA.getValVT();
2213 // If this is x86-64, and we disabled SSE, we can't return FP values
2214 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2215 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2216 report_fatal_error("SSE register return with SSE disabled");
2219 // If we prefer to use the value in xmm registers, copy it out as f80 and
2220 // use a truncate to move it from fp stack reg to xmm reg.
2221 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2222 isScalarFPTypeInSSEReg(VA.getValVT()))
2225 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2226 CopyVT, InFlag).getValue(1);
2227 SDValue Val = Chain.getValue(0);
2229 if (CopyVT != VA.getValVT())
2230 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2231 // This truncation won't change the value.
2232 DAG.getIntPtrConstant(1));
2234 InFlag = Chain.getValue(2);
2235 InVals.push_back(Val);
2241 //===----------------------------------------------------------------------===//
2242 // C & StdCall & Fast Calling Convention implementation
2243 //===----------------------------------------------------------------------===//
2244 // StdCall calling convention seems to be standard for many Windows' API
2245 // routines and around. It differs from C calling convention just a little:
2246 // callee should clean up the stack, not caller. Symbols should be also
2247 // decorated in some fancy way :) It doesn't support any vector arguments.
2248 // For info on fast calling convention see Fast Calling Convention (tail call)
2249 // implementation LowerX86_32FastCCCallTo.
2251 /// CallIsStructReturn - Determines whether a call uses struct return
2253 enum StructReturnType {
2258 static StructReturnType
2259 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2261 return NotStructReturn;
2263 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2264 if (!Flags.isSRet())
2265 return NotStructReturn;
2266 if (Flags.isInReg())
2267 return RegStructReturn;
2268 return StackStructReturn;
2271 /// Determines whether a function uses struct return semantics.
2272 static StructReturnType
2273 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2275 return NotStructReturn;
2277 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2278 if (!Flags.isSRet())
2279 return NotStructReturn;
2280 if (Flags.isInReg())
2281 return RegStructReturn;
2282 return StackStructReturn;
2285 /// Make a copy of an aggregate at address specified by "Src" to address
2286 /// "Dst" with size and alignment information specified by the specific
2287 /// parameter attribute. The copy will be passed as a byval function parameter.
2289 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2290 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2292 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2294 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2295 /*isVolatile*/false, /*AlwaysInline=*/true,
2296 MachinePointerInfo(), MachinePointerInfo());
2299 /// Return true if the calling convention is one that
2300 /// supports tail call optimization.
2301 static bool IsTailCallConvention(CallingConv::ID CC) {
2302 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2303 CC == CallingConv::HiPE);
2306 /// \brief Return true if the calling convention is a C calling convention.
2307 static bool IsCCallConvention(CallingConv::ID CC) {
2308 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2309 CC == CallingConv::X86_64_SysV);
2312 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2313 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2317 CallingConv::ID CalleeCC = CS.getCallingConv();
2318 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2324 /// Return true if the function is being made into
2325 /// a tailcall target by changing its ABI.
2326 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2327 bool GuaranteedTailCallOpt) {
2328 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2332 X86TargetLowering::LowerMemArgument(SDValue Chain,
2333 CallingConv::ID CallConv,
2334 const SmallVectorImpl<ISD::InputArg> &Ins,
2335 SDLoc dl, SelectionDAG &DAG,
2336 const CCValAssign &VA,
2337 MachineFrameInfo *MFI,
2339 // Create the nodes corresponding to a load from this parameter slot.
2340 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2341 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2342 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2343 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2346 // If value is passed by pointer we have address passed instead of the value
2348 if (VA.getLocInfo() == CCValAssign::Indirect)
2349 ValVT = VA.getLocVT();
2351 ValVT = VA.getValVT();
2353 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2354 // changed with more analysis.
2355 // In case of tail call optimization mark all arguments mutable. Since they
2356 // could be overwritten by lowering of arguments in case of a tail call.
2357 if (Flags.isByVal()) {
2358 unsigned Bytes = Flags.getByValSize();
2359 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2360 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2361 return DAG.getFrameIndex(FI, getPointerTy());
2363 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2364 VA.getLocMemOffset(), isImmutable);
2365 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2366 return DAG.getLoad(ValVT, dl, Chain, FIN,
2367 MachinePointerInfo::getFixedStack(FI),
2368 false, false, false, 0);
2372 // FIXME: Get this from tablegen.
2373 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2374 const X86Subtarget *Subtarget) {
2375 assert(Subtarget->is64Bit());
2377 if (Subtarget->isCallingConvWin64(CallConv)) {
2378 static const MCPhysReg GPR64ArgRegsWin64[] = {
2379 X86::RCX, X86::RDX, X86::R8, X86::R9
2381 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2384 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2385 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2387 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2390 // FIXME: Get this from tablegen.
2391 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2392 CallingConv::ID CallConv,
2393 const X86Subtarget *Subtarget) {
2394 assert(Subtarget->is64Bit());
2395 if (Subtarget->isCallingConvWin64(CallConv)) {
2396 // The XMM registers which might contain var arg parameters are shadowed
2397 // in their paired GPR. So we only need to save the GPR to their home
2399 // TODO: __vectorcall will change this.
2403 const Function *Fn = MF.getFunction();
2404 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2405 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2406 "SSE register cannot be used when SSE is disabled!");
2407 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2408 !Subtarget->hasSSE1())
2409 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2413 static const MCPhysReg XMMArgRegs64Bit[] = {
2414 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2415 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2417 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2421 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2422 CallingConv::ID CallConv,
2424 const SmallVectorImpl<ISD::InputArg> &Ins,
2427 SmallVectorImpl<SDValue> &InVals)
2429 MachineFunction &MF = DAG.getMachineFunction();
2430 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2432 const Function* Fn = MF.getFunction();
2433 if (Fn->hasExternalLinkage() &&
2434 Subtarget->isTargetCygMing() &&
2435 Fn->getName() == "main")
2436 FuncInfo->setForceFramePointer(true);
2438 MachineFrameInfo *MFI = MF.getFrameInfo();
2439 bool Is64Bit = Subtarget->is64Bit();
2440 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2442 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2443 "Var args not supported with calling convention fastcc, ghc or hipe");
2445 // Assign locations to all of the incoming arguments.
2446 SmallVector<CCValAssign, 16> ArgLocs;
2447 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2449 // Allocate shadow area for Win64
2451 CCInfo.AllocateStack(32, 8);
2453 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2455 unsigned LastVal = ~0U;
2457 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2458 CCValAssign &VA = ArgLocs[i];
2459 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2461 assert(VA.getValNo() != LastVal &&
2462 "Don't support value assigned to multiple locs yet");
2464 LastVal = VA.getValNo();
2466 if (VA.isRegLoc()) {
2467 EVT RegVT = VA.getLocVT();
2468 const TargetRegisterClass *RC;
2469 if (RegVT == MVT::i32)
2470 RC = &X86::GR32RegClass;
2471 else if (Is64Bit && RegVT == MVT::i64)
2472 RC = &X86::GR64RegClass;
2473 else if (RegVT == MVT::f32)
2474 RC = &X86::FR32RegClass;
2475 else if (RegVT == MVT::f64)
2476 RC = &X86::FR64RegClass;
2477 else if (RegVT.is512BitVector())
2478 RC = &X86::VR512RegClass;
2479 else if (RegVT.is256BitVector())
2480 RC = &X86::VR256RegClass;
2481 else if (RegVT.is128BitVector())
2482 RC = &X86::VR128RegClass;
2483 else if (RegVT == MVT::x86mmx)
2484 RC = &X86::VR64RegClass;
2485 else if (RegVT == MVT::i1)
2486 RC = &X86::VK1RegClass;
2487 else if (RegVT == MVT::v8i1)
2488 RC = &X86::VK8RegClass;
2489 else if (RegVT == MVT::v16i1)
2490 RC = &X86::VK16RegClass;
2491 else if (RegVT == MVT::v32i1)
2492 RC = &X86::VK32RegClass;
2493 else if (RegVT == MVT::v64i1)
2494 RC = &X86::VK64RegClass;
2496 llvm_unreachable("Unknown argument type!");
2498 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2499 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2501 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2502 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2504 if (VA.getLocInfo() == CCValAssign::SExt)
2505 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2506 DAG.getValueType(VA.getValVT()));
2507 else if (VA.getLocInfo() == CCValAssign::ZExt)
2508 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2509 DAG.getValueType(VA.getValVT()));
2510 else if (VA.getLocInfo() == CCValAssign::BCvt)
2511 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2513 if (VA.isExtInLoc()) {
2514 // Handle MMX values passed in XMM regs.
2515 if (RegVT.isVector())
2516 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2518 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2521 assert(VA.isMemLoc());
2522 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2525 // If value is passed via pointer - do a load.
2526 if (VA.getLocInfo() == CCValAssign::Indirect)
2527 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2528 MachinePointerInfo(), false, false, false, 0);
2530 InVals.push_back(ArgValue);
2533 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2534 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2535 // The x86-64 ABIs require that for returning structs by value we copy
2536 // the sret argument into %rax/%eax (depending on ABI) for the return.
2537 // Win32 requires us to put the sret argument to %eax as well.
2538 // Save the argument into a virtual register so that we can access it
2539 // from the return points.
2540 if (Ins[i].Flags.isSRet()) {
2541 unsigned Reg = FuncInfo->getSRetReturnReg();
2543 MVT PtrTy = getPointerTy();
2544 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2545 FuncInfo->setSRetReturnReg(Reg);
2547 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2548 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2554 unsigned StackSize = CCInfo.getNextStackOffset();
2555 // Align stack specially for tail calls.
2556 if (FuncIsMadeTailCallSafe(CallConv,
2557 MF.getTarget().Options.GuaranteedTailCallOpt))
2558 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2560 // If the function takes variable number of arguments, make a frame index for
2561 // the start of the first vararg value... for expansion of llvm.va_start. We
2562 // can skip this if there are no va_start calls.
2563 if (MFI->hasVAStart() &&
2564 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2565 CallConv != CallingConv::X86_ThisCall))) {
2566 FuncInfo->setVarArgsFrameIndex(
2567 MFI->CreateFixedObject(1, StackSize, true));
2570 // Figure out if XMM registers are in use.
2571 assert(!(MF.getTarget().Options.UseSoftFloat &&
2572 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
2573 "SSE register cannot be used when SSE is disabled!");
2575 // 64-bit calling conventions support varargs and register parameters, so we
2576 // have to do extra work to spill them in the prologue.
2577 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2578 // Find the first unallocated argument registers.
2579 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2580 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2581 unsigned NumIntRegs =
2582 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2583 unsigned NumXMMRegs =
2584 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2585 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2586 "SSE register cannot be used when SSE is disabled!");
2588 // Gather all the live in physical registers.
2589 SmallVector<SDValue, 6> LiveGPRs;
2590 SmallVector<SDValue, 8> LiveXMMRegs;
2592 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2593 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2595 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2597 if (!ArgXMMs.empty()) {
2598 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2599 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2600 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2601 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2602 LiveXMMRegs.push_back(
2603 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2608 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2609 // Get to the caller-allocated home save location. Add 8 to account
2610 // for the return address.
2611 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2612 FuncInfo->setRegSaveFrameIndex(
2613 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2614 // Fixup to set vararg frame on shadow area (4 x i64).
2616 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2618 // For X86-64, if there are vararg parameters that are passed via
2619 // registers, then we must store them to their spots on the stack so
2620 // they may be loaded by deferencing the result of va_next.
2621 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2622 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2623 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2624 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2627 // Store the integer parameter registers.
2628 SmallVector<SDValue, 8> MemOps;
2629 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2631 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2632 for (SDValue Val : LiveGPRs) {
2633 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2634 DAG.getIntPtrConstant(Offset));
2636 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2637 MachinePointerInfo::getFixedStack(
2638 FuncInfo->getRegSaveFrameIndex(), Offset),
2640 MemOps.push_back(Store);
2644 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2645 // Now store the XMM (fp + vector) parameter registers.
2646 SmallVector<SDValue, 12> SaveXMMOps;
2647 SaveXMMOps.push_back(Chain);
2648 SaveXMMOps.push_back(ALVal);
2649 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2650 FuncInfo->getRegSaveFrameIndex()));
2651 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2652 FuncInfo->getVarArgsFPOffset()));
2653 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2655 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2656 MVT::Other, SaveXMMOps));
2659 if (!MemOps.empty())
2660 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2663 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2664 // Find the largest legal vector type.
2665 MVT VecVT = MVT::Other;
2666 // FIXME: Only some x86_32 calling conventions support AVX512.
2667 if (Subtarget->hasAVX512() &&
2668 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2669 CallConv == CallingConv::Intel_OCL_BI)))
2670 VecVT = MVT::v16f32;
2671 else if (Subtarget->hasAVX())
2673 else if (Subtarget->hasSSE2())
2676 // We forward some GPRs and some vector types.
2677 SmallVector<MVT, 2> RegParmTypes;
2678 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2679 RegParmTypes.push_back(IntVT);
2680 if (VecVT != MVT::Other)
2681 RegParmTypes.push_back(VecVT);
2683 // Compute the set of forwarded registers. The rest are scratch.
2684 SmallVectorImpl<ForwardedRegister> &Forwards =
2685 FuncInfo->getForwardedMustTailRegParms();
2686 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2688 // Conservatively forward AL on x86_64, since it might be used for varargs.
2689 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2690 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2691 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2694 // Copy all forwards from physical to virtual registers.
2695 for (ForwardedRegister &F : Forwards) {
2696 // FIXME: Can we use a less constrained schedule?
2697 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2698 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2699 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2703 // Some CCs need callee pop.
2704 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2705 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2706 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2708 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2709 // If this is an sret function, the return should pop the hidden pointer.
2710 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2711 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2712 argsAreStructReturn(Ins) == StackStructReturn)
2713 FuncInfo->setBytesToPopOnReturn(4);
2717 // RegSaveFrameIndex is X86-64 only.
2718 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2719 if (CallConv == CallingConv::X86_FastCall ||
2720 CallConv == CallingConv::X86_ThisCall)
2721 // fastcc functions can't have varargs.
2722 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2725 FuncInfo->setArgumentStackSize(StackSize);
2731 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2732 SDValue StackPtr, SDValue Arg,
2733 SDLoc dl, SelectionDAG &DAG,
2734 const CCValAssign &VA,
2735 ISD::ArgFlagsTy Flags) const {
2736 unsigned LocMemOffset = VA.getLocMemOffset();
2737 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2738 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2739 if (Flags.isByVal())
2740 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2742 return DAG.getStore(Chain, dl, Arg, PtrOff,
2743 MachinePointerInfo::getStack(LocMemOffset),
2747 /// Emit a load of return address if tail call
2748 /// optimization is performed and it is required.
2750 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2751 SDValue &OutRetAddr, SDValue Chain,
2752 bool IsTailCall, bool Is64Bit,
2753 int FPDiff, SDLoc dl) const {
2754 // Adjust the Return address stack slot.
2755 EVT VT = getPointerTy();
2756 OutRetAddr = getReturnAddressFrameIndex(DAG);
2758 // Load the "old" Return address.
2759 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2760 false, false, false, 0);
2761 return SDValue(OutRetAddr.getNode(), 1);
2764 /// Emit a store of the return address if tail call
2765 /// optimization is performed and it is required (FPDiff!=0).
2766 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2767 SDValue Chain, SDValue RetAddrFrIdx,
2768 EVT PtrVT, unsigned SlotSize,
2769 int FPDiff, SDLoc dl) {
2770 // Store the return address to the appropriate stack slot.
2771 if (!FPDiff) return Chain;
2772 // Calculate the new stack slot for the return address.
2773 int NewReturnAddrFI =
2774 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2776 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2777 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2778 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2784 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2785 SmallVectorImpl<SDValue> &InVals) const {
2786 SelectionDAG &DAG = CLI.DAG;
2788 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2789 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2790 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2791 SDValue Chain = CLI.Chain;
2792 SDValue Callee = CLI.Callee;
2793 CallingConv::ID CallConv = CLI.CallConv;
2794 bool &isTailCall = CLI.IsTailCall;
2795 bool isVarArg = CLI.IsVarArg;
2797 MachineFunction &MF = DAG.getMachineFunction();
2798 bool Is64Bit = Subtarget->is64Bit();
2799 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2800 StructReturnType SR = callIsStructReturn(Outs);
2801 bool IsSibcall = false;
2802 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2804 if (MF.getTarget().Options.DisableTailCalls)
2807 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2809 // Force this to be a tail call. The verifier rules are enough to ensure
2810 // that we can lower this successfully without moving the return address
2813 } else if (isTailCall) {
2814 // Check if it's really possible to do a tail call.
2815 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2816 isVarArg, SR != NotStructReturn,
2817 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2818 Outs, OutVals, Ins, DAG);
2820 // Sibcalls are automatically detected tailcalls which do not require
2822 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2829 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2830 "Var args not supported with calling convention fastcc, ghc or hipe");
2832 // Analyze operands of the call, assigning locations to each operand.
2833 SmallVector<CCValAssign, 16> ArgLocs;
2834 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2836 // Allocate shadow area for Win64
2838 CCInfo.AllocateStack(32, 8);
2840 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2842 // Get a count of how many bytes are to be pushed on the stack.
2843 unsigned NumBytes = CCInfo.getNextStackOffset();
2845 // This is a sibcall. The memory operands are available in caller's
2846 // own caller's stack.
2848 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2849 IsTailCallConvention(CallConv))
2850 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2853 if (isTailCall && !IsSibcall && !IsMustTail) {
2854 // Lower arguments at fp - stackoffset + fpdiff.
2855 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2857 FPDiff = NumBytesCallerPushed - NumBytes;
2859 // Set the delta of movement of the returnaddr stackslot.
2860 // But only set if delta is greater than previous delta.
2861 if (FPDiff < X86Info->getTCReturnAddrDelta())
2862 X86Info->setTCReturnAddrDelta(FPDiff);
2865 unsigned NumBytesToPush = NumBytes;
2866 unsigned NumBytesToPop = NumBytes;
2868 // If we have an inalloca argument, all stack space has already been allocated
2869 // for us and be right at the top of the stack. We don't support multiple
2870 // arguments passed in memory when using inalloca.
2871 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2873 if (!ArgLocs.back().isMemLoc())
2874 report_fatal_error("cannot use inalloca attribute on a register "
2876 if (ArgLocs.back().getLocMemOffset() != 0)
2877 report_fatal_error("any parameter with the inalloca attribute must be "
2878 "the only memory argument");
2882 Chain = DAG.getCALLSEQ_START(
2883 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2885 SDValue RetAddrFrIdx;
2886 // Load return address for tail calls.
2887 if (isTailCall && FPDiff)
2888 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2889 Is64Bit, FPDiff, dl);
2891 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2892 SmallVector<SDValue, 8> MemOpChains;
2895 // Walk the register/memloc assignments, inserting copies/loads. In the case
2896 // of tail call optimization arguments are handle later.
2897 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2898 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2899 // Skip inalloca arguments, they have already been written.
2900 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2901 if (Flags.isInAlloca())
2904 CCValAssign &VA = ArgLocs[i];
2905 EVT RegVT = VA.getLocVT();
2906 SDValue Arg = OutVals[i];
2907 bool isByVal = Flags.isByVal();
2909 // Promote the value if needed.
2910 switch (VA.getLocInfo()) {
2911 default: llvm_unreachable("Unknown loc info!");
2912 case CCValAssign::Full: break;
2913 case CCValAssign::SExt:
2914 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2916 case CCValAssign::ZExt:
2917 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2919 case CCValAssign::AExt:
2920 if (RegVT.is128BitVector()) {
2921 // Special case: passing MMX values in XMM registers.
2922 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2923 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2924 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2926 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2928 case CCValAssign::BCvt:
2929 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2931 case CCValAssign::Indirect: {
2932 // Store the argument.
2933 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2934 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2935 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2936 MachinePointerInfo::getFixedStack(FI),
2943 if (VA.isRegLoc()) {
2944 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2945 if (isVarArg && IsWin64) {
2946 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2947 // shadow reg if callee is a varargs function.
2948 unsigned ShadowReg = 0;
2949 switch (VA.getLocReg()) {
2950 case X86::XMM0: ShadowReg = X86::RCX; break;
2951 case X86::XMM1: ShadowReg = X86::RDX; break;
2952 case X86::XMM2: ShadowReg = X86::R8; break;
2953 case X86::XMM3: ShadowReg = X86::R9; break;
2956 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2958 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2959 assert(VA.isMemLoc());
2960 if (!StackPtr.getNode())
2961 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2963 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2964 dl, DAG, VA, Flags));
2968 if (!MemOpChains.empty())
2969 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2971 if (Subtarget->isPICStyleGOT()) {
2972 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2975 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2976 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2978 // If we are tail calling and generating PIC/GOT style code load the
2979 // address of the callee into ECX. The value in ecx is used as target of
2980 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2981 // for tail calls on PIC/GOT architectures. Normally we would just put the
2982 // address of GOT into ebx and then call target@PLT. But for tail calls
2983 // ebx would be restored (since ebx is callee saved) before jumping to the
2986 // Note: The actual moving to ECX is done further down.
2987 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2988 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2989 !G->getGlobal()->hasProtectedVisibility())
2990 Callee = LowerGlobalAddress(Callee, DAG);
2991 else if (isa<ExternalSymbolSDNode>(Callee))
2992 Callee = LowerExternalSymbol(Callee, DAG);
2996 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
2997 // From AMD64 ABI document:
2998 // For calls that may call functions that use varargs or stdargs
2999 // (prototype-less calls or calls to functions containing ellipsis (...) in
3000 // the declaration) %al is used as hidden argument to specify the number
3001 // of SSE registers used. The contents of %al do not need to match exactly
3002 // the number of registers, but must be an ubound on the number of SSE
3003 // registers used and is in the range 0 - 8 inclusive.
3005 // Count the number of XMM registers allocated.
3006 static const MCPhysReg XMMArgRegs[] = {
3007 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3008 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3010 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3011 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3012 && "SSE registers cannot be used when SSE is disabled");
3014 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3015 DAG.getConstant(NumXMMRegs, MVT::i8)));
3018 if (isVarArg && IsMustTail) {
3019 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3020 for (const auto &F : Forwards) {
3021 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3022 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3026 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3027 // don't need this because the eligibility check rejects calls that require
3028 // shuffling arguments passed in memory.
3029 if (!IsSibcall && isTailCall) {
3030 // Force all the incoming stack arguments to be loaded from the stack
3031 // before any new outgoing arguments are stored to the stack, because the
3032 // outgoing stack slots may alias the incoming argument stack slots, and
3033 // the alias isn't otherwise explicit. This is slightly more conservative
3034 // than necessary, because it means that each store effectively depends
3035 // on every argument instead of just those arguments it would clobber.
3036 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3038 SmallVector<SDValue, 8> MemOpChains2;
3041 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3042 CCValAssign &VA = ArgLocs[i];
3045 assert(VA.isMemLoc());
3046 SDValue Arg = OutVals[i];
3047 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3048 // Skip inalloca arguments. They don't require any work.
3049 if (Flags.isInAlloca())
3051 // Create frame index.
3052 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3053 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3054 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3055 FIN = DAG.getFrameIndex(FI, getPointerTy());
3057 if (Flags.isByVal()) {
3058 // Copy relative to framepointer.
3059 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3060 if (!StackPtr.getNode())
3061 StackPtr = DAG.getCopyFromReg(Chain, dl,
3062 RegInfo->getStackRegister(),
3064 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3066 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3070 // Store relative to framepointer.
3071 MemOpChains2.push_back(
3072 DAG.getStore(ArgChain, dl, Arg, FIN,
3073 MachinePointerInfo::getFixedStack(FI),
3078 if (!MemOpChains2.empty())
3079 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3081 // Store the return address to the appropriate stack slot.
3082 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3083 getPointerTy(), RegInfo->getSlotSize(),
3087 // Build a sequence of copy-to-reg nodes chained together with token chain
3088 // and flag operands which copy the outgoing args into registers.
3090 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3091 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3092 RegsToPass[i].second, InFlag);
3093 InFlag = Chain.getValue(1);
3096 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3097 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3098 // In the 64-bit large code model, we have to make all calls
3099 // through a register, since the call instruction's 32-bit
3100 // pc-relative offset may not be large enough to hold the whole
3102 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3103 // If the callee is a GlobalAddress node (quite common, every direct call
3104 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3106 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3108 // We should use extra load for direct calls to dllimported functions in
3110 const GlobalValue *GV = G->getGlobal();
3111 if (!GV->hasDLLImportStorageClass()) {
3112 unsigned char OpFlags = 0;
3113 bool ExtraLoad = false;
3114 unsigned WrapperKind = ISD::DELETED_NODE;
3116 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3117 // external symbols most go through the PLT in PIC mode. If the symbol
3118 // has hidden or protected visibility, or if it is static or local, then
3119 // we don't need to use the PLT - we can directly call it.
3120 if (Subtarget->isTargetELF() &&
3121 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3122 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3123 OpFlags = X86II::MO_PLT;
3124 } else if (Subtarget->isPICStyleStubAny() &&
3125 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3126 (!Subtarget->getTargetTriple().isMacOSX() ||
3127 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3128 // PC-relative references to external symbols should go through $stub,
3129 // unless we're building with the leopard linker or later, which
3130 // automatically synthesizes these stubs.
3131 OpFlags = X86II::MO_DARWIN_STUB;
3132 } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
3133 cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
3134 // If the function is marked as non-lazy, generate an indirect call
3135 // which loads from the GOT directly. This avoids runtime overhead
3136 // at the cost of eager binding (and one extra byte of encoding).
3137 OpFlags = X86II::MO_GOTPCREL;
3138 WrapperKind = X86ISD::WrapperRIP;
3142 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3143 G->getOffset(), OpFlags);
3145 // Add a wrapper if needed.
3146 if (WrapperKind != ISD::DELETED_NODE)
3147 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3148 // Add extra indirection if needed.
3150 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3151 MachinePointerInfo::getGOT(),
3152 false, false, false, 0);
3154 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3155 unsigned char OpFlags = 0;
3157 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3158 // external symbols should go through the PLT.
3159 if (Subtarget->isTargetELF() &&
3160 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3161 OpFlags = X86II::MO_PLT;
3162 } else if (Subtarget->isPICStyleStubAny() &&
3163 (!Subtarget->getTargetTriple().isMacOSX() ||
3164 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3165 // PC-relative references to external symbols should go through $stub,
3166 // unless we're building with the leopard linker or later, which
3167 // automatically synthesizes these stubs.
3168 OpFlags = X86II::MO_DARWIN_STUB;
3171 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3173 } else if (Subtarget->isTarget64BitILP32() &&
3174 Callee->getValueType(0) == MVT::i32) {
3175 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3176 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3179 // Returns a chain & a flag for retval copy to use.
3180 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3181 SmallVector<SDValue, 8> Ops;
3183 if (!IsSibcall && isTailCall) {
3184 Chain = DAG.getCALLSEQ_END(Chain,
3185 DAG.getIntPtrConstant(NumBytesToPop, true),
3186 DAG.getIntPtrConstant(0, true), InFlag, dl);
3187 InFlag = Chain.getValue(1);
3190 Ops.push_back(Chain);
3191 Ops.push_back(Callee);
3194 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3196 // Add argument registers to the end of the list so that they are known live
3198 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3199 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3200 RegsToPass[i].second.getValueType()));
3202 // Add a register mask operand representing the call-preserved registers.
3203 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3204 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3205 assert(Mask && "Missing call preserved mask for calling convention");
3206 Ops.push_back(DAG.getRegisterMask(Mask));
3208 if (InFlag.getNode())
3209 Ops.push_back(InFlag);
3213 //// If this is the first return lowered for this function, add the regs
3214 //// to the liveout set for the function.
3215 // This isn't right, although it's probably harmless on x86; liveouts
3216 // should be computed from returns not tail calls. Consider a void
3217 // function making a tail call to a function returning int.
3218 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3221 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3222 InFlag = Chain.getValue(1);
3224 // Create the CALLSEQ_END node.
3225 unsigned NumBytesForCalleeToPop;
3226 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3227 DAG.getTarget().Options.GuaranteedTailCallOpt))
3228 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3229 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3230 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3231 SR == StackStructReturn)
3232 // If this is a call to a struct-return function, the callee
3233 // pops the hidden struct pointer, so we have to push it back.
3234 // This is common for Darwin/X86, Linux & Mingw32 targets.
3235 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3236 NumBytesForCalleeToPop = 4;
3238 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3240 // Returns a flag for retval copy to use.
3242 Chain = DAG.getCALLSEQ_END(Chain,
3243 DAG.getIntPtrConstant(NumBytesToPop, true),
3244 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3247 InFlag = Chain.getValue(1);
3250 // Handle result values, copying them out of physregs into vregs that we
3252 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3253 Ins, dl, DAG, InVals);
3256 //===----------------------------------------------------------------------===//
3257 // Fast Calling Convention (tail call) implementation
3258 //===----------------------------------------------------------------------===//
3260 // Like std call, callee cleans arguments, convention except that ECX is
3261 // reserved for storing the tail called function address. Only 2 registers are
3262 // free for argument passing (inreg). Tail call optimization is performed
3264 // * tailcallopt is enabled
3265 // * caller/callee are fastcc
3266 // On X86_64 architecture with GOT-style position independent code only local
3267 // (within module) calls are supported at the moment.
3268 // To keep the stack aligned according to platform abi the function
3269 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3270 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3271 // If a tail called function callee has more arguments than the caller the
3272 // caller needs to make sure that there is room to move the RETADDR to. This is
3273 // achieved by reserving an area the size of the argument delta right after the
3274 // original RETADDR, but before the saved framepointer or the spilled registers
3275 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3287 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3288 /// for a 16 byte align requirement.
3290 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3291 SelectionDAG& DAG) const {
3292 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3293 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3294 unsigned StackAlignment = TFI.getStackAlignment();
3295 uint64_t AlignMask = StackAlignment - 1;
3296 int64_t Offset = StackSize;
3297 unsigned SlotSize = RegInfo->getSlotSize();
3298 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3299 // Number smaller than 12 so just add the difference.
3300 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3302 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3303 Offset = ((~AlignMask) & Offset) + StackAlignment +
3304 (StackAlignment-SlotSize);
3309 /// MatchingStackOffset - Return true if the given stack call argument is
3310 /// already available in the same position (relatively) of the caller's
3311 /// incoming argument stack.
3313 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3314 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3315 const X86InstrInfo *TII) {
3316 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3318 if (Arg.getOpcode() == ISD::CopyFromReg) {
3319 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3320 if (!TargetRegisterInfo::isVirtualRegister(VR))
3322 MachineInstr *Def = MRI->getVRegDef(VR);
3325 if (!Flags.isByVal()) {
3326 if (!TII->isLoadFromStackSlot(Def, FI))
3329 unsigned Opcode = Def->getOpcode();
3330 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3331 Opcode == X86::LEA64_32r) &&
3332 Def->getOperand(1).isFI()) {
3333 FI = Def->getOperand(1).getIndex();
3334 Bytes = Flags.getByValSize();
3338 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3339 if (Flags.isByVal())
3340 // ByVal argument is passed in as a pointer but it's now being
3341 // dereferenced. e.g.
3342 // define @foo(%struct.X* %A) {
3343 // tail call @bar(%struct.X* byval %A)
3346 SDValue Ptr = Ld->getBasePtr();
3347 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3350 FI = FINode->getIndex();
3351 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3352 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3353 FI = FINode->getIndex();
3354 Bytes = Flags.getByValSize();
3358 assert(FI != INT_MAX);
3359 if (!MFI->isFixedObjectIndex(FI))
3361 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3364 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3365 /// for tail call optimization. Targets which want to do tail call
3366 /// optimization should implement this function.
3368 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3369 CallingConv::ID CalleeCC,
3371 bool isCalleeStructRet,
3372 bool isCallerStructRet,
3374 const SmallVectorImpl<ISD::OutputArg> &Outs,
3375 const SmallVectorImpl<SDValue> &OutVals,
3376 const SmallVectorImpl<ISD::InputArg> &Ins,
3377 SelectionDAG &DAG) const {
3378 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3381 // If -tailcallopt is specified, make fastcc functions tail-callable.
3382 const MachineFunction &MF = DAG.getMachineFunction();
3383 const Function *CallerF = MF.getFunction();
3385 // If the function return type is x86_fp80 and the callee return type is not,
3386 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3387 // perform a tailcall optimization here.
3388 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3391 CallingConv::ID CallerCC = CallerF->getCallingConv();
3392 bool CCMatch = CallerCC == CalleeCC;
3393 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3394 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3396 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3397 if (IsTailCallConvention(CalleeCC) && CCMatch)
3402 // Look for obvious safe cases to perform tail call optimization that do not
3403 // require ABI changes. This is what gcc calls sibcall.
3405 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3406 // emit a special epilogue.
3407 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3408 if (RegInfo->needsStackRealignment(MF))
3411 // Also avoid sibcall optimization if either caller or callee uses struct
3412 // return semantics.
3413 if (isCalleeStructRet || isCallerStructRet)
3416 // An stdcall/thiscall caller is expected to clean up its arguments; the
3417 // callee isn't going to do that.
3418 // FIXME: this is more restrictive than needed. We could produce a tailcall
3419 // when the stack adjustment matches. For example, with a thiscall that takes
3420 // only one argument.
3421 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3422 CallerCC == CallingConv::X86_ThisCall))
3425 // Do not sibcall optimize vararg calls unless all arguments are passed via
3427 if (isVarArg && !Outs.empty()) {
3429 // Optimizing for varargs on Win64 is unlikely to be safe without
3430 // additional testing.
3431 if (IsCalleeWin64 || IsCallerWin64)
3434 SmallVector<CCValAssign, 16> ArgLocs;
3435 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3438 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3439 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3440 if (!ArgLocs[i].isRegLoc())
3444 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3445 // stack. Therefore, if it's not used by the call it is not safe to optimize
3446 // this into a sibcall.
3447 bool Unused = false;
3448 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3455 SmallVector<CCValAssign, 16> RVLocs;
3456 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3458 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3459 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3460 CCValAssign &VA = RVLocs[i];
3461 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3466 // If the calling conventions do not match, then we'd better make sure the
3467 // results are returned in the same way as what the caller expects.
3469 SmallVector<CCValAssign, 16> RVLocs1;
3470 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3472 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3474 SmallVector<CCValAssign, 16> RVLocs2;
3475 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3477 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3479 if (RVLocs1.size() != RVLocs2.size())
3481 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3482 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3484 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3486 if (RVLocs1[i].isRegLoc()) {
3487 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3490 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3496 // If the callee takes no arguments then go on to check the results of the
3498 if (!Outs.empty()) {
3499 // Check if stack adjustment is needed. For now, do not do this if any
3500 // argument is passed on the stack.
3501 SmallVector<CCValAssign, 16> ArgLocs;
3502 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3505 // Allocate shadow area for Win64
3507 CCInfo.AllocateStack(32, 8);
3509 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3510 if (CCInfo.getNextStackOffset()) {
3511 MachineFunction &MF = DAG.getMachineFunction();
3512 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3515 // Check if the arguments are already laid out in the right way as
3516 // the caller's fixed stack objects.
3517 MachineFrameInfo *MFI = MF.getFrameInfo();
3518 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3519 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3520 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3521 CCValAssign &VA = ArgLocs[i];
3522 SDValue Arg = OutVals[i];
3523 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3524 if (VA.getLocInfo() == CCValAssign::Indirect)
3526 if (!VA.isRegLoc()) {
3527 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3534 // If the tailcall address may be in a register, then make sure it's
3535 // possible to register allocate for it. In 32-bit, the call address can
3536 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3537 // callee-saved registers are restored. These happen to be the same
3538 // registers used to pass 'inreg' arguments so watch out for those.
3539 if (!Subtarget->is64Bit() &&
3540 ((!isa<GlobalAddressSDNode>(Callee) &&
3541 !isa<ExternalSymbolSDNode>(Callee)) ||
3542 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3543 unsigned NumInRegs = 0;
3544 // In PIC we need an extra register to formulate the address computation
3546 unsigned MaxInRegs =
3547 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3549 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3550 CCValAssign &VA = ArgLocs[i];
3553 unsigned Reg = VA.getLocReg();
3556 case X86::EAX: case X86::EDX: case X86::ECX:
3557 if (++NumInRegs == MaxInRegs)
3569 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3570 const TargetLibraryInfo *libInfo) const {
3571 return X86::createFastISel(funcInfo, libInfo);
3574 //===----------------------------------------------------------------------===//
3575 // Other Lowering Hooks
3576 //===----------------------------------------------------------------------===//
3578 static bool MayFoldLoad(SDValue Op) {
3579 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3582 static bool MayFoldIntoStore(SDValue Op) {
3583 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3586 static bool isTargetShuffle(unsigned Opcode) {
3588 default: return false;
3589 case X86ISD::BLENDI:
3590 case X86ISD::PSHUFB:
3591 case X86ISD::PSHUFD:
3592 case X86ISD::PSHUFHW:
3593 case X86ISD::PSHUFLW:
3595 case X86ISD::PALIGNR:
3596 case X86ISD::MOVLHPS:
3597 case X86ISD::MOVLHPD:
3598 case X86ISD::MOVHLPS:
3599 case X86ISD::MOVLPS:
3600 case X86ISD::MOVLPD:
3601 case X86ISD::MOVSHDUP:
3602 case X86ISD::MOVSLDUP:
3603 case X86ISD::MOVDDUP:
3606 case X86ISD::UNPCKL:
3607 case X86ISD::UNPCKH:
3608 case X86ISD::VPERMILPI:
3609 case X86ISD::VPERM2X128:
3610 case X86ISD::VPERMI:
3615 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3616 SDValue V1, SelectionDAG &DAG) {
3618 default: llvm_unreachable("Unknown x86 shuffle node");
3619 case X86ISD::MOVSHDUP:
3620 case X86ISD::MOVSLDUP:
3621 case X86ISD::MOVDDUP:
3622 return DAG.getNode(Opc, dl, VT, V1);
3626 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3627 SDValue V1, unsigned TargetMask,
3628 SelectionDAG &DAG) {
3630 default: llvm_unreachable("Unknown x86 shuffle node");
3631 case X86ISD::PSHUFD:
3632 case X86ISD::PSHUFHW:
3633 case X86ISD::PSHUFLW:
3634 case X86ISD::VPERMILPI:
3635 case X86ISD::VPERMI:
3636 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3640 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3641 SDValue V1, SDValue V2, unsigned TargetMask,
3642 SelectionDAG &DAG) {
3644 default: llvm_unreachable("Unknown x86 shuffle node");
3645 case X86ISD::PALIGNR:
3646 case X86ISD::VALIGN:
3648 case X86ISD::VPERM2X128:
3649 return DAG.getNode(Opc, dl, VT, V1, V2,
3650 DAG.getConstant(TargetMask, MVT::i8));
3654 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3655 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3657 default: llvm_unreachable("Unknown x86 shuffle node");
3658 case X86ISD::MOVLHPS:
3659 case X86ISD::MOVLHPD:
3660 case X86ISD::MOVHLPS:
3661 case X86ISD::MOVLPS:
3662 case X86ISD::MOVLPD:
3665 case X86ISD::UNPCKL:
3666 case X86ISD::UNPCKH:
3667 return DAG.getNode(Opc, dl, VT, V1, V2);
3671 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3672 MachineFunction &MF = DAG.getMachineFunction();
3673 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3674 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3675 int ReturnAddrIndex = FuncInfo->getRAIndex();
3677 if (ReturnAddrIndex == 0) {
3678 // Set up a frame object for the return address.
3679 unsigned SlotSize = RegInfo->getSlotSize();
3680 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3683 FuncInfo->setRAIndex(ReturnAddrIndex);
3686 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3689 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3690 bool hasSymbolicDisplacement) {
3691 // Offset should fit into 32 bit immediate field.
3692 if (!isInt<32>(Offset))
3695 // If we don't have a symbolic displacement - we don't have any extra
3697 if (!hasSymbolicDisplacement)
3700 // FIXME: Some tweaks might be needed for medium code model.
3701 if (M != CodeModel::Small && M != CodeModel::Kernel)
3704 // For small code model we assume that latest object is 16MB before end of 31
3705 // bits boundary. We may also accept pretty large negative constants knowing
3706 // that all objects are in the positive half of address space.
3707 if (M == CodeModel::Small && Offset < 16*1024*1024)
3710 // For kernel code model we know that all object resist in the negative half
3711 // of 32bits address space. We may not accept negative offsets, since they may
3712 // be just off and we may accept pretty large positive ones.
3713 if (M == CodeModel::Kernel && Offset >= 0)
3719 /// isCalleePop - Determines whether the callee is required to pop its
3720 /// own arguments. Callee pop is necessary to support tail calls.
3721 bool X86::isCalleePop(CallingConv::ID CallingConv,
3722 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3723 switch (CallingConv) {
3726 case CallingConv::X86_StdCall:
3727 case CallingConv::X86_FastCall:
3728 case CallingConv::X86_ThisCall:
3730 case CallingConv::Fast:
3731 case CallingConv::GHC:
3732 case CallingConv::HiPE:
3739 /// \brief Return true if the condition is an unsigned comparison operation.
3740 static bool isX86CCUnsigned(unsigned X86CC) {
3742 default: llvm_unreachable("Invalid integer condition!");
3743 case X86::COND_E: return true;
3744 case X86::COND_G: return false;
3745 case X86::COND_GE: return false;
3746 case X86::COND_L: return false;
3747 case X86::COND_LE: return false;
3748 case X86::COND_NE: return true;
3749 case X86::COND_B: return true;
3750 case X86::COND_A: return true;
3751 case X86::COND_BE: return true;
3752 case X86::COND_AE: return true;
3754 llvm_unreachable("covered switch fell through?!");
3757 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3758 /// specific condition code, returning the condition code and the LHS/RHS of the
3759 /// comparison to make.
3760 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3761 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3763 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3764 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3765 // X > -1 -> X == 0, jump !sign.
3766 RHS = DAG.getConstant(0, RHS.getValueType());
3767 return X86::COND_NS;
3769 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3770 // X < 0 -> X == 0, jump on sign.
3773 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3775 RHS = DAG.getConstant(0, RHS.getValueType());
3776 return X86::COND_LE;
3780 switch (SetCCOpcode) {
3781 default: llvm_unreachable("Invalid integer condition!");
3782 case ISD::SETEQ: return X86::COND_E;
3783 case ISD::SETGT: return X86::COND_G;
3784 case ISD::SETGE: return X86::COND_GE;
3785 case ISD::SETLT: return X86::COND_L;
3786 case ISD::SETLE: return X86::COND_LE;
3787 case ISD::SETNE: return X86::COND_NE;
3788 case ISD::SETULT: return X86::COND_B;
3789 case ISD::SETUGT: return X86::COND_A;
3790 case ISD::SETULE: return X86::COND_BE;
3791 case ISD::SETUGE: return X86::COND_AE;
3795 // First determine if it is required or is profitable to flip the operands.
3797 // If LHS is a foldable load, but RHS is not, flip the condition.
3798 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3799 !ISD::isNON_EXTLoad(RHS.getNode())) {
3800 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3801 std::swap(LHS, RHS);
3804 switch (SetCCOpcode) {
3810 std::swap(LHS, RHS);
3814 // On a floating point condition, the flags are set as follows:
3816 // 0 | 0 | 0 | X > Y
3817 // 0 | 0 | 1 | X < Y
3818 // 1 | 0 | 0 | X == Y
3819 // 1 | 1 | 1 | unordered
3820 switch (SetCCOpcode) {
3821 default: llvm_unreachable("Condcode should be pre-legalized away");
3823 case ISD::SETEQ: return X86::COND_E;
3824 case ISD::SETOLT: // flipped
3826 case ISD::SETGT: return X86::COND_A;
3827 case ISD::SETOLE: // flipped
3829 case ISD::SETGE: return X86::COND_AE;
3830 case ISD::SETUGT: // flipped
3832 case ISD::SETLT: return X86::COND_B;
3833 case ISD::SETUGE: // flipped
3835 case ISD::SETLE: return X86::COND_BE;
3837 case ISD::SETNE: return X86::COND_NE;
3838 case ISD::SETUO: return X86::COND_P;
3839 case ISD::SETO: return X86::COND_NP;
3841 case ISD::SETUNE: return X86::COND_INVALID;
3845 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3846 /// code. Current x86 isa includes the following FP cmov instructions:
3847 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3848 static bool hasFPCMov(unsigned X86CC) {
3864 /// isFPImmLegal - Returns true if the target can instruction select the
3865 /// specified FP immediate natively. If false, the legalizer will
3866 /// materialize the FP immediate as a load from a constant pool.
3867 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3868 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3869 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3875 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3876 ISD::LoadExtType ExtTy,
3878 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3879 // relocation target a movq or addq instruction: don't let the load shrink.
3880 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3881 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3882 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3883 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3887 /// \brief Returns true if it is beneficial to convert a load of a constant
3888 /// to just the constant itself.
3889 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3891 assert(Ty->isIntegerTy());
3893 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3894 if (BitSize == 0 || BitSize > 64)
3899 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3900 unsigned Index) const {
3901 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3904 return (Index == 0 || Index == ResVT.getVectorNumElements());
3907 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3908 // Speculate cttz only if we can directly use TZCNT.
3909 return Subtarget->hasBMI();
3912 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3913 // Speculate ctlz only if we can directly use LZCNT.
3914 return Subtarget->hasLZCNT();
3917 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3918 /// the specified range (L, H].
3919 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3920 return (Val < 0) || (Val >= Low && Val < Hi);
3923 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3924 /// specified value.
3925 static bool isUndefOrEqual(int Val, int CmpVal) {
3926 return (Val < 0 || Val == CmpVal);
3929 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3930 /// from position Pos and ending in Pos+Size, falls within the specified
3931 /// sequential range (Low, Low+Size]. or is undef.
3932 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3933 unsigned Pos, unsigned Size, int Low) {
3934 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3935 if (!isUndefOrEqual(Mask[i], Low))
3940 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3941 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3942 /// operand - by default will match for first operand.
3943 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3944 bool TestSecondOperand = false) {
3945 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3946 VT != MVT::v2f64 && VT != MVT::v2i64)
3949 unsigned NumElems = VT.getVectorNumElements();
3950 unsigned Lo = TestSecondOperand ? NumElems : 0;
3951 unsigned Hi = Lo + NumElems;
3953 for (unsigned i = 0; i < NumElems; ++i)
3954 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3960 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3961 /// is suitable for input to PSHUFHW.
3962 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3963 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3966 // Lower quadword copied in order or undef.
3967 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3970 // Upper quadword shuffled.
3971 for (unsigned i = 4; i != 8; ++i)
3972 if (!isUndefOrInRange(Mask[i], 4, 8))
3975 if (VT == MVT::v16i16) {
3976 // Lower quadword copied in order or undef.
3977 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3980 // Upper quadword shuffled.
3981 for (unsigned i = 12; i != 16; ++i)
3982 if (!isUndefOrInRange(Mask[i], 12, 16))
3989 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3990 /// is suitable for input to PSHUFLW.
3991 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3992 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3995 // Upper quadword copied in order.
3996 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
3999 // Lower quadword shuffled.
4000 for (unsigned i = 0; i != 4; ++i)
4001 if (!isUndefOrInRange(Mask[i], 0, 4))
4004 if (VT == MVT::v16i16) {
4005 // Upper quadword copied in order.
4006 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4009 // Lower quadword shuffled.
4010 for (unsigned i = 8; i != 12; ++i)
4011 if (!isUndefOrInRange(Mask[i], 8, 12))
4018 /// \brief Return true if the mask specifies a shuffle of elements that is
4019 /// suitable for input to intralane (palignr) or interlane (valign) vector
4021 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4022 unsigned NumElts = VT.getVectorNumElements();
4023 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4024 unsigned NumLaneElts = NumElts/NumLanes;
4026 // Do not handle 64-bit element shuffles with palignr.
4027 if (NumLaneElts == 2)
4030 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4032 for (i = 0; i != NumLaneElts; ++i) {
4037 // Lane is all undef, go to next lane
4038 if (i == NumLaneElts)
4041 int Start = Mask[i+l];
4043 // Make sure its in this lane in one of the sources
4044 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4045 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4048 // If not lane 0, then we must match lane 0
4049 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4052 // Correct second source to be contiguous with first source
4053 if (Start >= (int)NumElts)
4054 Start -= NumElts - NumLaneElts;
4056 // Make sure we're shifting in the right direction.
4057 if (Start <= (int)(i+l))
4062 // Check the rest of the elements to see if they are consecutive.
4063 for (++i; i != NumLaneElts; ++i) {
4064 int Idx = Mask[i+l];
4066 // Make sure its in this lane
4067 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4068 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4071 // If not lane 0, then we must match lane 0
4072 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4075 if (Idx >= (int)NumElts)
4076 Idx -= NumElts - NumLaneElts;
4078 if (!isUndefOrEqual(Idx, Start+i))
4087 /// \brief Return true if the node specifies a shuffle of elements that is
4088 /// suitable for input to PALIGNR.
4089 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4090 const X86Subtarget *Subtarget) {
4091 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4092 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4093 VT.is512BitVector())
4094 // FIXME: Add AVX512BW.
4097 return isAlignrMask(Mask, VT, false);
4100 /// \brief Return true if the node specifies a shuffle of elements that is
4101 /// suitable for input to VALIGN.
4102 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4103 const X86Subtarget *Subtarget) {
4104 // FIXME: Add AVX512VL.
4105 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4107 return isAlignrMask(Mask, VT, true);
4110 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4111 /// the two vector operands have swapped position.
4112 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4113 unsigned NumElems) {
4114 for (unsigned i = 0; i != NumElems; ++i) {
4118 else if (idx < (int)NumElems)
4119 Mask[i] = idx + NumElems;
4121 Mask[i] = idx - NumElems;
4125 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4126 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4127 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4128 /// reverse of what x86 shuffles want.
4129 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4131 unsigned NumElems = VT.getVectorNumElements();
4132 unsigned NumLanes = VT.getSizeInBits()/128;
4133 unsigned NumLaneElems = NumElems/NumLanes;
4135 if (NumLaneElems != 2 && NumLaneElems != 4)
4138 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4139 bool symmetricMaskRequired =
4140 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4142 // VSHUFPSY divides the resulting vector into 4 chunks.
4143 // The sources are also splitted into 4 chunks, and each destination
4144 // chunk must come from a different source chunk.
4146 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4147 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4149 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4150 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4152 // VSHUFPDY divides the resulting vector into 4 chunks.
4153 // The sources are also splitted into 4 chunks, and each destination
4154 // chunk must come from a different source chunk.
4156 // SRC1 => X3 X2 X1 X0
4157 // SRC2 => Y3 Y2 Y1 Y0
4159 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4161 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4162 unsigned HalfLaneElems = NumLaneElems/2;
4163 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4164 for (unsigned i = 0; i != NumLaneElems; ++i) {
4165 int Idx = Mask[i+l];
4166 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4167 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4169 // For VSHUFPSY, the mask of the second half must be the same as the
4170 // first but with the appropriate offsets. This works in the same way as
4171 // VPERMILPS works with masks.
4172 if (!symmetricMaskRequired || Idx < 0)
4174 if (MaskVal[i] < 0) {
4175 MaskVal[i] = Idx - l;
4178 if ((signed)(Idx - l) != MaskVal[i])
4186 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4187 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4188 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4189 if (!VT.is128BitVector())
4192 unsigned NumElems = VT.getVectorNumElements();
4197 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4198 return isUndefOrEqual(Mask[0], 6) &&
4199 isUndefOrEqual(Mask[1], 7) &&
4200 isUndefOrEqual(Mask[2], 2) &&
4201 isUndefOrEqual(Mask[3], 3);
4204 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4205 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4207 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4208 if (!VT.is128BitVector())
4211 unsigned NumElems = VT.getVectorNumElements();
4216 return isUndefOrEqual(Mask[0], 2) &&
4217 isUndefOrEqual(Mask[1], 3) &&
4218 isUndefOrEqual(Mask[2], 2) &&
4219 isUndefOrEqual(Mask[3], 3);
4222 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4223 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4224 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4225 if (!VT.is128BitVector())
4228 unsigned NumElems = VT.getVectorNumElements();
4230 if (NumElems != 2 && NumElems != 4)
4233 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4234 if (!isUndefOrEqual(Mask[i], i + NumElems))
4237 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4238 if (!isUndefOrEqual(Mask[i], i))
4244 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4245 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4246 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4247 if (!VT.is128BitVector())
4250 unsigned NumElems = VT.getVectorNumElements();
4252 if (NumElems != 2 && NumElems != 4)
4255 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4256 if (!isUndefOrEqual(Mask[i], i))
4259 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4260 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4266 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4267 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4268 /// i. e: If all but one element come from the same vector.
4269 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4270 // TODO: Deal with AVX's VINSERTPS
4271 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4274 unsigned CorrectPosV1 = 0;
4275 unsigned CorrectPosV2 = 0;
4276 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4277 if (Mask[i] == -1) {
4285 else if (Mask[i] == i + 4)
4289 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4290 // We have 3 elements (undefs count as elements from any vector) from one
4291 // vector, and one from another.
4298 // Some special combinations that can be optimized.
4301 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4302 SelectionDAG &DAG) {
4303 MVT VT = SVOp->getSimpleValueType(0);
4306 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4309 ArrayRef<int> Mask = SVOp->getMask();
4311 // These are the special masks that may be optimized.
4312 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4313 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4314 bool MatchEvenMask = true;
4315 bool MatchOddMask = true;
4316 for (int i=0; i<8; ++i) {
4317 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4318 MatchEvenMask = false;
4319 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4320 MatchOddMask = false;
4323 if (!MatchEvenMask && !MatchOddMask)
4326 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4328 SDValue Op0 = SVOp->getOperand(0);
4329 SDValue Op1 = SVOp->getOperand(1);
4331 if (MatchEvenMask) {
4332 // Shift the second operand right to 32 bits.
4333 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4334 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4336 // Shift the first operand left to 32 bits.
4337 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4338 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4340 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4341 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4344 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4345 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4346 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4347 bool HasInt256, bool V2IsSplat = false) {
4349 assert(VT.getSizeInBits() >= 128 &&
4350 "Unsupported vector type for unpckl");
4352 unsigned NumElts = VT.getVectorNumElements();
4353 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4354 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4357 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4358 "Unsupported vector type for unpckh");
4360 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4361 unsigned NumLanes = VT.getSizeInBits()/128;
4362 unsigned NumLaneElts = NumElts/NumLanes;
4364 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4365 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4366 int BitI = Mask[l+i];
4367 int BitI1 = Mask[l+i+1];
4368 if (!isUndefOrEqual(BitI, j))
4371 if (!isUndefOrEqual(BitI1, NumElts))
4374 if (!isUndefOrEqual(BitI1, j + NumElts))
4383 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4384 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4385 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4386 bool HasInt256, bool V2IsSplat = false) {
4387 assert(VT.getSizeInBits() >= 128 &&
4388 "Unsupported vector type for unpckh");
4390 unsigned NumElts = VT.getVectorNumElements();
4391 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4392 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4395 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4396 "Unsupported vector type for unpckh");
4398 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4399 unsigned NumLanes = VT.getSizeInBits()/128;
4400 unsigned NumLaneElts = NumElts/NumLanes;
4402 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4403 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4404 int BitI = Mask[l+i];
4405 int BitI1 = Mask[l+i+1];
4406 if (!isUndefOrEqual(BitI, j))
4409 if (isUndefOrEqual(BitI1, NumElts))
4412 if (!isUndefOrEqual(BitI1, j+NumElts))
4420 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4421 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4423 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4424 unsigned NumElts = VT.getVectorNumElements();
4425 bool Is256BitVec = VT.is256BitVector();
4427 if (VT.is512BitVector())
4429 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4430 "Unsupported vector type for unpckh");
4432 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4433 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4436 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4437 // FIXME: Need a better way to get rid of this, there's no latency difference
4438 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4439 // the former later. We should also remove the "_undef" special mask.
4440 if (NumElts == 4 && Is256BitVec)
4443 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4444 // independently on 128-bit lanes.
4445 unsigned NumLanes = VT.getSizeInBits()/128;
4446 unsigned NumLaneElts = NumElts/NumLanes;
4448 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4449 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4450 int BitI = Mask[l+i];
4451 int BitI1 = Mask[l+i+1];
4453 if (!isUndefOrEqual(BitI, j))
4455 if (!isUndefOrEqual(BitI1, j))
4463 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4464 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4466 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4467 unsigned NumElts = VT.getVectorNumElements();
4469 if (VT.is512BitVector())
4472 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4473 "Unsupported vector type for unpckh");
4475 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4476 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4479 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4480 // independently on 128-bit lanes.
4481 unsigned NumLanes = VT.getSizeInBits()/128;
4482 unsigned NumLaneElts = NumElts/NumLanes;
4484 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4485 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4486 int BitI = Mask[l+i];
4487 int BitI1 = Mask[l+i+1];
4488 if (!isUndefOrEqual(BitI, j))
4490 if (!isUndefOrEqual(BitI1, j))
4497 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4498 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4499 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4500 if (!VT.is512BitVector())
4503 unsigned NumElts = VT.getVectorNumElements();
4504 unsigned HalfSize = NumElts/2;
4505 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4506 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4511 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4512 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4520 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4521 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4522 /// MOVSD, and MOVD, i.e. setting the lowest element.
4523 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4524 if (VT.getVectorElementType().getSizeInBits() < 32)
4526 if (!VT.is128BitVector())
4529 unsigned NumElts = VT.getVectorNumElements();
4531 if (!isUndefOrEqual(Mask[0], NumElts))
4534 for (unsigned i = 1; i != NumElts; ++i)
4535 if (!isUndefOrEqual(Mask[i], i))
4541 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4542 /// as permutations between 128-bit chunks or halves. As an example: this
4544 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4545 /// The first half comes from the second half of V1 and the second half from the
4546 /// the second half of V2.
4547 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4548 if (!HasFp256 || !VT.is256BitVector())
4551 // The shuffle result is divided into half A and half B. In total the two
4552 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4553 // B must come from C, D, E or F.
4554 unsigned HalfSize = VT.getVectorNumElements()/2;
4555 bool MatchA = false, MatchB = false;
4557 // Check if A comes from one of C, D, E, F.
4558 for (unsigned Half = 0; Half != 4; ++Half) {
4559 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4565 // Check if B comes from one of C, D, E, F.
4566 for (unsigned Half = 0; Half != 4; ++Half) {
4567 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4573 return MatchA && MatchB;
4576 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4577 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4578 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4579 MVT VT = SVOp->getSimpleValueType(0);
4581 unsigned HalfSize = VT.getVectorNumElements()/2;
4583 unsigned FstHalf = 0, SndHalf = 0;
4584 for (unsigned i = 0; i < HalfSize; ++i) {
4585 if (SVOp->getMaskElt(i) > 0) {
4586 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4590 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4591 if (SVOp->getMaskElt(i) > 0) {
4592 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4597 return (FstHalf | (SndHalf << 4));
4600 // Symmetric in-lane mask. Each lane has 4 elements (for imm8)
4601 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4602 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4606 unsigned NumElts = VT.getVectorNumElements();
4608 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4609 for (unsigned i = 0; i != NumElts; ++i) {
4612 Imm8 |= Mask[i] << (i*2);
4617 unsigned LaneSize = 4;
4618 SmallVector<int, 4> MaskVal(LaneSize, -1);
4620 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4621 for (unsigned i = 0; i != LaneSize; ++i) {
4622 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4626 if (MaskVal[i] < 0) {
4627 MaskVal[i] = Mask[i+l] - l;
4628 Imm8 |= MaskVal[i] << (i*2);
4631 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4638 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4639 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4640 /// Note that VPERMIL mask matching is different depending whether theunderlying
4641 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4642 /// to the same elements of the low, but to the higher half of the source.
4643 /// In VPERMILPD the two lanes could be shuffled independently of each other
4644 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4645 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4646 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4647 if (VT.getSizeInBits() < 256 || EltSize < 32)
4649 bool symmetricMaskRequired = (EltSize == 32);
4650 unsigned NumElts = VT.getVectorNumElements();
4652 unsigned NumLanes = VT.getSizeInBits()/128;
4653 unsigned LaneSize = NumElts/NumLanes;
4654 // 2 or 4 elements in one lane
4656 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4657 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4658 for (unsigned i = 0; i != LaneSize; ++i) {
4659 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4661 if (symmetricMaskRequired) {
4662 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4663 ExpectedMaskVal[i] = Mask[i+l] - l;
4666 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4674 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4675 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4676 /// element of vector 2 and the other elements to come from vector 1 in order.
4677 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4678 bool V2IsSplat = false, bool V2IsUndef = false) {
4679 if (!VT.is128BitVector())
4682 unsigned NumOps = VT.getVectorNumElements();
4683 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4686 if (!isUndefOrEqual(Mask[0], 0))
4689 for (unsigned i = 1; i != NumOps; ++i)
4690 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4691 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4692 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4698 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4699 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4700 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4701 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4702 const X86Subtarget *Subtarget) {
4703 if (!Subtarget->hasSSE3())
4706 unsigned NumElems = VT.getVectorNumElements();
4708 if ((VT.is128BitVector() && NumElems != 4) ||
4709 (VT.is256BitVector() && NumElems != 8) ||
4710 (VT.is512BitVector() && NumElems != 16))
4713 // "i+1" is the value the indexed mask element must have
4714 for (unsigned i = 0; i != NumElems; i += 2)
4715 if (!isUndefOrEqual(Mask[i], i+1) ||
4716 !isUndefOrEqual(Mask[i+1], i+1))
4722 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4723 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4724 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4725 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4726 const X86Subtarget *Subtarget) {
4727 if (!Subtarget->hasSSE3())
4730 unsigned NumElems = VT.getVectorNumElements();
4732 if ((VT.is128BitVector() && NumElems != 4) ||
4733 (VT.is256BitVector() && NumElems != 8) ||
4734 (VT.is512BitVector() && NumElems != 16))
4737 // "i" is the value the indexed mask element must have
4738 for (unsigned i = 0; i != NumElems; i += 2)
4739 if (!isUndefOrEqual(Mask[i], i) ||
4740 !isUndefOrEqual(Mask[i+1], i))
4746 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4747 /// specifies a shuffle of elements that is suitable for input to 256-bit
4748 /// version of MOVDDUP.
4749 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4750 if (!HasFp256 || !VT.is256BitVector())
4753 unsigned NumElts = VT.getVectorNumElements();
4757 for (unsigned i = 0; i != NumElts/2; ++i)
4758 if (!isUndefOrEqual(Mask[i], 0))
4760 for (unsigned i = NumElts/2; i != NumElts; ++i)
4761 if (!isUndefOrEqual(Mask[i], NumElts/2))
4766 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4767 /// specifies a shuffle of elements that is suitable for input to 128-bit
4768 /// version of MOVDDUP.
4769 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4770 if (!VT.is128BitVector())
4773 unsigned e = VT.getVectorNumElements() / 2;
4774 for (unsigned i = 0; i != e; ++i)
4775 if (!isUndefOrEqual(Mask[i], i))
4777 for (unsigned i = 0; i != e; ++i)
4778 if (!isUndefOrEqual(Mask[e+i], i))
4783 /// isVEXTRACTIndex - Return true if the specified
4784 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4785 /// suitable for instruction that extract 128 or 256 bit vectors
4786 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4787 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4788 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4791 // The index should be aligned on a vecWidth-bit boundary.
4793 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4795 MVT VT = N->getSimpleValueType(0);
4796 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4797 bool Result = (Index * ElSize) % vecWidth == 0;
4802 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4803 /// operand specifies a subvector insert that is suitable for input to
4804 /// insertion of 128 or 256-bit subvectors
4805 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4806 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4807 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4809 // The index should be aligned on a vecWidth-bit boundary.
4811 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4813 MVT VT = N->getSimpleValueType(0);
4814 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4815 bool Result = (Index * ElSize) % vecWidth == 0;
4820 bool X86::isVINSERT128Index(SDNode *N) {
4821 return isVINSERTIndex(N, 128);
4824 bool X86::isVINSERT256Index(SDNode *N) {
4825 return isVINSERTIndex(N, 256);
4828 bool X86::isVEXTRACT128Index(SDNode *N) {
4829 return isVEXTRACTIndex(N, 128);
4832 bool X86::isVEXTRACT256Index(SDNode *N) {
4833 return isVEXTRACTIndex(N, 256);
4836 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4837 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4838 /// Handles 128-bit and 256-bit.
4839 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4840 MVT VT = N->getSimpleValueType(0);
4842 assert((VT.getSizeInBits() >= 128) &&
4843 "Unsupported vector type for PSHUF/SHUFP");
4845 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4846 // independently on 128-bit lanes.
4847 unsigned NumElts = VT.getVectorNumElements();
4848 unsigned NumLanes = VT.getSizeInBits()/128;
4849 unsigned NumLaneElts = NumElts/NumLanes;
4851 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4852 "Only supports 2, 4 or 8 elements per lane");
4854 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4856 for (unsigned i = 0; i != NumElts; ++i) {
4857 int Elt = N->getMaskElt(i);
4858 if (Elt < 0) continue;
4859 Elt &= NumLaneElts - 1;
4860 unsigned ShAmt = (i << Shift) % 8;
4861 Mask |= Elt << ShAmt;
4867 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4868 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4869 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4870 MVT VT = N->getSimpleValueType(0);
4872 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4873 "Unsupported vector type for PSHUFHW");
4875 unsigned NumElts = VT.getVectorNumElements();
4878 for (unsigned l = 0; l != NumElts; l += 8) {
4879 // 8 nodes per lane, but we only care about the last 4.
4880 for (unsigned i = 0; i < 4; ++i) {
4881 int Elt = N->getMaskElt(l+i+4);
4882 if (Elt < 0) continue;
4883 Elt &= 0x3; // only 2-bits.
4884 Mask |= Elt << (i * 2);
4891 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4892 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4893 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4894 MVT VT = N->getSimpleValueType(0);
4896 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4897 "Unsupported vector type for PSHUFHW");
4899 unsigned NumElts = VT.getVectorNumElements();
4902 for (unsigned l = 0; l != NumElts; l += 8) {
4903 // 8 nodes per lane, but we only care about the first 4.
4904 for (unsigned i = 0; i < 4; ++i) {
4905 int Elt = N->getMaskElt(l+i);
4906 if (Elt < 0) continue;
4907 Elt &= 0x3; // only 2-bits
4908 Mask |= Elt << (i * 2);
4915 /// \brief Return the appropriate immediate to shuffle the specified
4916 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4917 /// VALIGN (if Interlane is true) instructions.
4918 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4920 MVT VT = SVOp->getSimpleValueType(0);
4921 unsigned EltSize = InterLane ? 1 :
4922 VT.getVectorElementType().getSizeInBits() >> 3;
4924 unsigned NumElts = VT.getVectorNumElements();
4925 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4926 unsigned NumLaneElts = NumElts/NumLanes;
4930 for (i = 0; i != NumElts; ++i) {
4931 Val = SVOp->getMaskElt(i);
4935 if (Val >= (int)NumElts)
4936 Val -= NumElts - NumLaneElts;
4938 assert(Val - i > 0 && "PALIGNR imm should be positive");
4939 return (Val - i) * EltSize;
4942 /// \brief Return the appropriate immediate to shuffle the specified
4943 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4944 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4945 return getShuffleAlignrImmediate(SVOp, false);
4948 /// \brief Return the appropriate immediate to shuffle the specified
4949 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4950 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4951 return getShuffleAlignrImmediate(SVOp, true);
4955 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4956 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4957 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4958 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4961 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4963 MVT VecVT = N->getOperand(0).getSimpleValueType();
4964 MVT ElVT = VecVT.getVectorElementType();
4966 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4967 return Index / NumElemsPerChunk;
4970 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4971 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4972 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4973 llvm_unreachable("Illegal insert subvector for VINSERT");
4976 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4978 MVT VecVT = N->getSimpleValueType(0);
4979 MVT ElVT = VecVT.getVectorElementType();
4981 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4982 return Index / NumElemsPerChunk;
4985 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4986 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4987 /// and VINSERTI128 instructions.
4988 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4989 return getExtractVEXTRACTImmediate(N, 128);
4992 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
4993 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
4994 /// and VINSERTI64x4 instructions.
4995 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
4996 return getExtractVEXTRACTImmediate(N, 256);
4999 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5000 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5001 /// and VINSERTI128 instructions.
5002 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5003 return getInsertVINSERTImmediate(N, 128);
5006 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5007 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5008 /// and VINSERTI64x4 instructions.
5009 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5010 return getInsertVINSERTImmediate(N, 256);
5013 /// isZero - Returns true if Elt is a constant integer zero
5014 static bool isZero(SDValue V) {
5015 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5016 return C && C->isNullValue();
5019 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5021 bool X86::isZeroNode(SDValue Elt) {
5024 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5025 return CFP->getValueAPF().isPosZero();
5029 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5030 /// match movhlps. The lower half elements should come from upper half of
5031 /// V1 (and in order), and the upper half elements should come from the upper
5032 /// half of V2 (and in order).
5033 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5034 if (!VT.is128BitVector())
5036 if (VT.getVectorNumElements() != 4)
5038 for (unsigned i = 0, e = 2; i != e; ++i)
5039 if (!isUndefOrEqual(Mask[i], i+2))
5041 for (unsigned i = 2; i != 4; ++i)
5042 if (!isUndefOrEqual(Mask[i], i+4))
5047 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5048 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5050 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5051 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5053 N = N->getOperand(0).getNode();
5054 if (!ISD::isNON_EXTLoad(N))
5057 *LD = cast<LoadSDNode>(N);
5061 // Test whether the given value is a vector value which will be legalized
5063 static bool WillBeConstantPoolLoad(SDNode *N) {
5064 if (N->getOpcode() != ISD::BUILD_VECTOR)
5067 // Check for any non-constant elements.
5068 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5069 switch (N->getOperand(i).getNode()->getOpcode()) {
5071 case ISD::ConstantFP:
5078 // Vectors of all-zeros and all-ones are materialized with special
5079 // instructions rather than being loaded.
5080 return !ISD::isBuildVectorAllZeros(N) &&
5081 !ISD::isBuildVectorAllOnes(N);
5084 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5085 /// match movlp{s|d}. The lower half elements should come from lower half of
5086 /// V1 (and in order), and the upper half elements should come from the upper
5087 /// half of V2 (and in order). And since V1 will become the source of the
5088 /// MOVLP, it must be either a vector load or a scalar load to vector.
5089 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5090 ArrayRef<int> Mask, MVT VT) {
5091 if (!VT.is128BitVector())
5094 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5096 // Is V2 is a vector load, don't do this transformation. We will try to use
5097 // load folding shufps op.
5098 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5101 unsigned NumElems = VT.getVectorNumElements();
5103 if (NumElems != 2 && NumElems != 4)
5105 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5106 if (!isUndefOrEqual(Mask[i], i))
5108 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5109 if (!isUndefOrEqual(Mask[i], i+NumElems))
5114 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5115 /// to an zero vector.
5116 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5117 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5118 SDValue V1 = N->getOperand(0);
5119 SDValue V2 = N->getOperand(1);
5120 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5121 for (unsigned i = 0; i != NumElems; ++i) {
5122 int Idx = N->getMaskElt(i);
5123 if (Idx >= (int)NumElems) {
5124 unsigned Opc = V2.getOpcode();
5125 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5127 if (Opc != ISD::BUILD_VECTOR ||
5128 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5130 } else if (Idx >= 0) {
5131 unsigned Opc = V1.getOpcode();
5132 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5134 if (Opc != ISD::BUILD_VECTOR ||
5135 !X86::isZeroNode(V1.getOperand(Idx)))
5142 /// getZeroVector - Returns a vector of specified type with all zero elements.
5144 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5145 SelectionDAG &DAG, SDLoc dl) {
5146 assert(VT.isVector() && "Expected a vector type");
5148 // Always build SSE zero vectors as <4 x i32> bitcasted
5149 // to their dest type. This ensures they get CSE'd.
5151 if (VT.is128BitVector()) { // SSE
5152 if (Subtarget->hasSSE2()) { // SSE2
5153 SDValue Cst = DAG.getConstant(0, MVT::i32);
5154 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5156 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5157 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5159 } else if (VT.is256BitVector()) { // AVX
5160 if (Subtarget->hasInt256()) { // AVX2
5161 SDValue Cst = DAG.getConstant(0, MVT::i32);
5162 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5163 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5165 // 256-bit logic and arithmetic instructions in AVX are all
5166 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5167 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5168 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5169 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5171 } else if (VT.is512BitVector()) { // AVX-512
5172 SDValue Cst = DAG.getConstant(0, MVT::i32);
5173 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5174 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5175 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5176 } else if (VT.getScalarType() == MVT::i1) {
5177 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5178 SDValue Cst = DAG.getConstant(0, MVT::i1);
5179 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5180 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5182 llvm_unreachable("Unexpected vector type");
5184 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5187 /// getOnesVector - Returns a vector of specified type with all bits set.
5188 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5189 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5190 /// Then bitcast to their original type, ensuring they get CSE'd.
5191 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5193 assert(VT.isVector() && "Expected a vector type");
5195 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5197 if (VT.is256BitVector()) {
5198 if (HasInt256) { // AVX2
5199 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5200 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5202 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5203 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5205 } else if (VT.is128BitVector()) {
5206 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5208 llvm_unreachable("Unexpected vector type");
5210 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5213 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5214 /// that point to V2 points to its first element.
5215 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5216 for (unsigned i = 0; i != NumElems; ++i) {
5217 if (Mask[i] > (int)NumElems) {
5223 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5224 /// operation of specified width.
5225 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5227 unsigned NumElems = VT.getVectorNumElements();
5228 SmallVector<int, 8> Mask;
5229 Mask.push_back(NumElems);
5230 for (unsigned i = 1; i != NumElems; ++i)
5232 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5235 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5236 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5238 unsigned NumElems = VT.getVectorNumElements();
5239 SmallVector<int, 8> Mask;
5240 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5242 Mask.push_back(i + NumElems);
5244 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5247 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5248 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5250 unsigned NumElems = VT.getVectorNumElements();
5251 SmallVector<int, 8> Mask;
5252 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5253 Mask.push_back(i + Half);
5254 Mask.push_back(i + NumElems + Half);
5256 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5259 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5260 // a generic shuffle instruction because the target has no such instructions.
5261 // Generate shuffles which repeat i16 and i8 several times until they can be
5262 // represented by v4f32 and then be manipulated by target suported shuffles.
5263 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5264 MVT VT = V.getSimpleValueType();
5265 int NumElems = VT.getVectorNumElements();
5268 while (NumElems > 4) {
5269 if (EltNo < NumElems/2) {
5270 V = getUnpackl(DAG, dl, VT, V, V);
5272 V = getUnpackh(DAG, dl, VT, V, V);
5273 EltNo -= NumElems/2;
5280 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5281 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5282 MVT VT = V.getSimpleValueType();
5285 if (VT.is128BitVector()) {
5286 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5287 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5288 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5290 } else if (VT.is256BitVector()) {
5291 // To use VPERMILPS to splat scalars, the second half of indicies must
5292 // refer to the higher part, which is a duplication of the lower one,
5293 // because VPERMILPS can only handle in-lane permutations.
5294 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5295 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5297 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5298 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5301 llvm_unreachable("Vector size not supported");
5303 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5306 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5307 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5308 MVT SrcVT = SV->getSimpleValueType(0);
5309 SDValue V1 = SV->getOperand(0);
5312 int EltNo = SV->getSplatIndex();
5313 int NumElems = SrcVT.getVectorNumElements();
5314 bool Is256BitVec = SrcVT.is256BitVector();
5316 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5317 "Unknown how to promote splat for type");
5319 // Extract the 128-bit part containing the splat element and update
5320 // the splat element index when it refers to the higher register.
5322 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5323 if (EltNo >= NumElems/2)
5324 EltNo -= NumElems/2;
5327 // All i16 and i8 vector types can't be used directly by a generic shuffle
5328 // instruction because the target has no such instruction. Generate shuffles
5329 // which repeat i16 and i8 several times until they fit in i32, and then can
5330 // be manipulated by target suported shuffles.
5331 MVT EltVT = SrcVT.getVectorElementType();
5332 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5333 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5335 // Recreate the 256-bit vector and place the same 128-bit vector
5336 // into the low and high part. This is necessary because we want
5337 // to use VPERM* to shuffle the vectors
5339 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5342 return getLegalSplat(DAG, V1, EltNo);
5345 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5346 /// vector of zero or undef vector. This produces a shuffle where the low
5347 /// element of V2 is swizzled into the zero/undef vector, landing at element
5348 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5349 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5351 const X86Subtarget *Subtarget,
5352 SelectionDAG &DAG) {
5353 MVT VT = V2.getSimpleValueType();
5355 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5356 unsigned NumElems = VT.getVectorNumElements();
5357 SmallVector<int, 16> MaskVec;
5358 for (unsigned i = 0; i != NumElems; ++i)
5359 // If this is the insertion idx, put the low elt of V2 here.
5360 MaskVec.push_back(i == Idx ? NumElems : i);
5361 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5364 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5365 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5366 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5367 /// shuffles which use a single input multiple times, and in those cases it will
5368 /// adjust the mask to only have indices within that single input.
5369 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5370 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5371 unsigned NumElems = VT.getVectorNumElements();
5375 bool IsFakeUnary = false;
5376 switch(N->getOpcode()) {
5377 case X86ISD::BLENDI:
5378 ImmN = N->getOperand(N->getNumOperands()-1);
5379 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5382 ImmN = N->getOperand(N->getNumOperands()-1);
5383 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5384 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5386 case X86ISD::UNPCKH:
5387 DecodeUNPCKHMask(VT, Mask);
5388 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5390 case X86ISD::UNPCKL:
5391 DecodeUNPCKLMask(VT, Mask);
5392 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5394 case X86ISD::MOVHLPS:
5395 DecodeMOVHLPSMask(NumElems, Mask);
5396 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5398 case X86ISD::MOVLHPS:
5399 DecodeMOVLHPSMask(NumElems, Mask);
5400 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5402 case X86ISD::PALIGNR:
5403 ImmN = N->getOperand(N->getNumOperands()-1);
5404 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5406 case X86ISD::PSHUFD:
5407 case X86ISD::VPERMILPI:
5408 ImmN = N->getOperand(N->getNumOperands()-1);
5409 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5412 case X86ISD::PSHUFHW:
5413 ImmN = N->getOperand(N->getNumOperands()-1);
5414 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5417 case X86ISD::PSHUFLW:
5418 ImmN = N->getOperand(N->getNumOperands()-1);
5419 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5422 case X86ISD::PSHUFB: {
5424 SDValue MaskNode = N->getOperand(1);
5425 while (MaskNode->getOpcode() == ISD::BITCAST)
5426 MaskNode = MaskNode->getOperand(0);
5428 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5429 // If we have a build-vector, then things are easy.
5430 EVT VT = MaskNode.getValueType();
5431 assert(VT.isVector() &&
5432 "Can't produce a non-vector with a build_vector!");
5433 if (!VT.isInteger())
5436 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5438 SmallVector<uint64_t, 32> RawMask;
5439 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5440 SDValue Op = MaskNode->getOperand(i);
5441 if (Op->getOpcode() == ISD::UNDEF) {
5442 RawMask.push_back((uint64_t)SM_SentinelUndef);
5445 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5448 APInt MaskElement = CN->getAPIntValue();
5450 // We now have to decode the element which could be any integer size and
5451 // extract each byte of it.
5452 for (int j = 0; j < NumBytesPerElement; ++j) {
5453 // Note that this is x86 and so always little endian: the low byte is
5454 // the first byte of the mask.
5455 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5456 MaskElement = MaskElement.lshr(8);
5459 DecodePSHUFBMask(RawMask, Mask);
5463 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5467 SDValue Ptr = MaskLoad->getBasePtr();
5468 if (Ptr->getOpcode() == X86ISD::Wrapper)
5469 Ptr = Ptr->getOperand(0);
5471 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5472 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5475 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5476 DecodePSHUFBMask(C, Mask);
5484 case X86ISD::VPERMI:
5485 ImmN = N->getOperand(N->getNumOperands()-1);
5486 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5491 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5493 case X86ISD::VPERM2X128:
5494 ImmN = N->getOperand(N->getNumOperands()-1);
5495 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5496 if (Mask.empty()) return false;
5498 case X86ISD::MOVSLDUP:
5499 DecodeMOVSLDUPMask(VT, Mask);
5502 case X86ISD::MOVSHDUP:
5503 DecodeMOVSHDUPMask(VT, Mask);
5506 case X86ISD::MOVDDUP:
5507 DecodeMOVDDUPMask(VT, Mask);
5510 case X86ISD::MOVLHPD:
5511 case X86ISD::MOVLPD:
5512 case X86ISD::MOVLPS:
5513 // Not yet implemented
5515 default: llvm_unreachable("unknown target shuffle node");
5518 // If we have a fake unary shuffle, the shuffle mask is spread across two
5519 // inputs that are actually the same node. Re-map the mask to always point
5520 // into the first input.
5523 if (M >= (int)Mask.size())
5529 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5530 /// element of the result of the vector shuffle.
5531 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5534 return SDValue(); // Limit search depth.
5536 SDValue V = SDValue(N, 0);
5537 EVT VT = V.getValueType();
5538 unsigned Opcode = V.getOpcode();
5540 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5541 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5542 int Elt = SV->getMaskElt(Index);
5545 return DAG.getUNDEF(VT.getVectorElementType());
5547 unsigned NumElems = VT.getVectorNumElements();
5548 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5549 : SV->getOperand(1);
5550 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5553 // Recurse into target specific vector shuffles to find scalars.
5554 if (isTargetShuffle(Opcode)) {
5555 MVT ShufVT = V.getSimpleValueType();
5556 unsigned NumElems = ShufVT.getVectorNumElements();
5557 SmallVector<int, 16> ShuffleMask;
5560 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5563 int Elt = ShuffleMask[Index];
5565 return DAG.getUNDEF(ShufVT.getVectorElementType());
5567 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5569 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5573 // Actual nodes that may contain scalar elements
5574 if (Opcode == ISD::BITCAST) {
5575 V = V.getOperand(0);
5576 EVT SrcVT = V.getValueType();
5577 unsigned NumElems = VT.getVectorNumElements();
5579 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5583 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5584 return (Index == 0) ? V.getOperand(0)
5585 : DAG.getUNDEF(VT.getVectorElementType());
5587 if (V.getOpcode() == ISD::BUILD_VECTOR)
5588 return V.getOperand(Index);
5593 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5594 /// shuffle operation which come from a consecutively from a zero. The
5595 /// search can start in two different directions, from left or right.
5596 /// We count undefs as zeros until PreferredNum is reached.
5597 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5598 unsigned NumElems, bool ZerosFromLeft,
5600 unsigned PreferredNum = -1U) {
5601 unsigned NumZeros = 0;
5602 for (unsigned i = 0; i != NumElems; ++i) {
5603 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5604 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5608 if (X86::isZeroNode(Elt))
5610 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5611 NumZeros = std::min(NumZeros + 1, PreferredNum);
5619 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5620 /// correspond consecutively to elements from one of the vector operands,
5621 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5623 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5624 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5625 unsigned NumElems, unsigned &OpNum) {
5626 bool SeenV1 = false;
5627 bool SeenV2 = false;
5629 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5630 int Idx = SVOp->getMaskElt(i);
5631 // Ignore undef indicies
5635 if (Idx < (int)NumElems)
5640 // Only accept consecutive elements from the same vector
5641 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5645 OpNum = SeenV1 ? 0 : 1;
5649 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5650 /// logical left shift of a vector.
5651 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5652 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5654 SVOp->getSimpleValueType(0).getVectorNumElements();
5655 unsigned NumZeros = getNumOfConsecutiveZeros(
5656 SVOp, NumElems, false /* check zeros from right */, DAG,
5657 SVOp->getMaskElt(0));
5663 // Considering the elements in the mask that are not consecutive zeros,
5664 // check if they consecutively come from only one of the source vectors.
5666 // V1 = {X, A, B, C} 0
5668 // vector_shuffle V1, V2 <1, 2, 3, X>
5670 if (!isShuffleMaskConsecutive(SVOp,
5671 0, // Mask Start Index
5672 NumElems-NumZeros, // Mask End Index(exclusive)
5673 NumZeros, // Where to start looking in the src vector
5674 NumElems, // Number of elements in vector
5675 OpSrc)) // Which source operand ?
5680 ShVal = SVOp->getOperand(OpSrc);
5684 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5685 /// logical left shift of a vector.
5686 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5687 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5689 SVOp->getSimpleValueType(0).getVectorNumElements();
5690 unsigned NumZeros = getNumOfConsecutiveZeros(
5691 SVOp, NumElems, true /* check zeros from left */, DAG,
5692 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5698 // Considering the elements in the mask that are not consecutive zeros,
5699 // check if they consecutively come from only one of the source vectors.
5701 // 0 { A, B, X, X } = V2
5703 // vector_shuffle V1, V2 <X, X, 4, 5>
5705 if (!isShuffleMaskConsecutive(SVOp,
5706 NumZeros, // Mask Start Index
5707 NumElems, // Mask End Index(exclusive)
5708 0, // Where to start looking in the src vector
5709 NumElems, // Number of elements in vector
5710 OpSrc)) // Which source operand ?
5715 ShVal = SVOp->getOperand(OpSrc);
5719 /// isVectorShift - Returns true if the shuffle can be implemented as a
5720 /// logical left or right shift of a vector.
5721 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5722 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5723 // Although the logic below support any bitwidth size, there are no
5724 // shift instructions which handle more than 128-bit vectors.
5725 if (!SVOp->getSimpleValueType(0).is128BitVector())
5728 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5729 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5735 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5737 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5738 unsigned NumNonZero, unsigned NumZero,
5740 const X86Subtarget* Subtarget,
5741 const TargetLowering &TLI) {
5748 for (unsigned i = 0; i < 16; ++i) {
5749 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5750 if (ThisIsNonZero && First) {
5752 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5754 V = DAG.getUNDEF(MVT::v8i16);
5759 SDValue ThisElt, LastElt;
5760 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5761 if (LastIsNonZero) {
5762 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5763 MVT::i16, Op.getOperand(i-1));
5765 if (ThisIsNonZero) {
5766 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5767 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5768 ThisElt, DAG.getConstant(8, MVT::i8));
5770 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5774 if (ThisElt.getNode())
5775 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5776 DAG.getIntPtrConstant(i/2));
5780 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5783 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5785 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5786 unsigned NumNonZero, unsigned NumZero,
5788 const X86Subtarget* Subtarget,
5789 const TargetLowering &TLI) {
5796 for (unsigned i = 0; i < 8; ++i) {
5797 bool isNonZero = (NonZeros & (1 << i)) != 0;
5801 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5803 V = DAG.getUNDEF(MVT::v8i16);
5806 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5807 MVT::v8i16, V, Op.getOperand(i),
5808 DAG.getIntPtrConstant(i));
5815 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5816 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5817 const X86Subtarget *Subtarget,
5818 const TargetLowering &TLI) {
5819 // Find all zeroable elements.
5821 for (int i=0; i < 4; ++i) {
5822 SDValue Elt = Op->getOperand(i);
5823 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5825 assert(std::count_if(&Zeroable[0], &Zeroable[4],
5826 [](bool M) { return !M; }) > 1 &&
5827 "We expect at least two non-zero elements!");
5829 // We only know how to deal with build_vector nodes where elements are either
5830 // zeroable or extract_vector_elt with constant index.
5831 SDValue FirstNonZero;
5832 unsigned FirstNonZeroIdx;
5833 for (unsigned i=0; i < 4; ++i) {
5836 SDValue Elt = Op->getOperand(i);
5837 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5838 !isa<ConstantSDNode>(Elt.getOperand(1)))
5840 // Make sure that this node is extracting from a 128-bit vector.
5841 MVT VT = Elt.getOperand(0).getSimpleValueType();
5842 if (!VT.is128BitVector())
5844 if (!FirstNonZero.getNode()) {
5846 FirstNonZeroIdx = i;
5850 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5851 SDValue V1 = FirstNonZero.getOperand(0);
5852 MVT VT = V1.getSimpleValueType();
5854 // See if this build_vector can be lowered as a blend with zero.
5856 unsigned EltMaskIdx, EltIdx;
5858 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5859 if (Zeroable[EltIdx]) {
5860 // The zero vector will be on the right hand side.
5861 Mask[EltIdx] = EltIdx+4;
5865 Elt = Op->getOperand(EltIdx);
5866 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5867 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5868 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5870 Mask[EltIdx] = EltIdx;
5874 // Let the shuffle legalizer deal with blend operations.
5875 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5876 if (V1.getSimpleValueType() != VT)
5877 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5878 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5881 // See if we can lower this build_vector to a INSERTPS.
5882 if (!Subtarget->hasSSE41())
5885 SDValue V2 = Elt.getOperand(0);
5886 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5889 bool CanFold = true;
5890 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5894 SDValue Current = Op->getOperand(i);
5895 SDValue SrcVector = Current->getOperand(0);
5898 CanFold = SrcVector == V1 &&
5899 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5905 assert(V1.getNode() && "Expected at least two non-zero elements!");
5906 if (V1.getSimpleValueType() != MVT::v4f32)
5907 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5908 if (V2.getSimpleValueType() != MVT::v4f32)
5909 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5911 // Ok, we can emit an INSERTPS instruction.
5913 for (int i = 0; i < 4; ++i)
5917 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5918 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5919 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5920 DAG.getIntPtrConstant(InsertPSMask));
5921 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5924 /// Return a vector logical shift node.
5925 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5926 unsigned NumBits, SelectionDAG &DAG,
5927 const TargetLowering &TLI, SDLoc dl) {
5928 assert(VT.is128BitVector() && "Unknown type for VShift");
5929 MVT ShVT = MVT::v2i64;
5930 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5931 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5932 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
5933 SDValue ShiftVal = DAG.getConstant(NumBits, ScalarShiftTy);
5934 return DAG.getNode(ISD::BITCAST, dl, VT,
5935 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5939 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5941 // Check if the scalar load can be widened into a vector load. And if
5942 // the address is "base + cst" see if the cst can be "absorbed" into
5943 // the shuffle mask.
5944 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5945 SDValue Ptr = LD->getBasePtr();
5946 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5948 EVT PVT = LD->getValueType(0);
5949 if (PVT != MVT::i32 && PVT != MVT::f32)
5954 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5955 FI = FINode->getIndex();
5957 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5958 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5959 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5960 Offset = Ptr.getConstantOperandVal(1);
5961 Ptr = Ptr.getOperand(0);
5966 // FIXME: 256-bit vector instructions don't require a strict alignment,
5967 // improve this code to support it better.
5968 unsigned RequiredAlign = VT.getSizeInBits()/8;
5969 SDValue Chain = LD->getChain();
5970 // Make sure the stack object alignment is at least 16 or 32.
5971 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5972 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5973 if (MFI->isFixedObjectIndex(FI)) {
5974 // Can't change the alignment. FIXME: It's possible to compute
5975 // the exact stack offset and reference FI + adjust offset instead.
5976 // If someone *really* cares about this. That's the way to implement it.
5979 MFI->setObjectAlignment(FI, RequiredAlign);
5983 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5984 // Ptr + (Offset & ~15).
5987 if ((Offset % RequiredAlign) & 3)
5989 int64_t StartOffset = Offset & ~(RequiredAlign-1);
5991 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
5992 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
5994 int EltNo = (Offset - StartOffset) >> 2;
5995 unsigned NumElems = VT.getVectorNumElements();
5997 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
5998 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
5999 LD->getPointerInfo().getWithOffset(StartOffset),
6000 false, false, false, 0);
6002 SmallVector<int, 8> Mask;
6003 for (unsigned i = 0; i != NumElems; ++i)
6004 Mask.push_back(EltNo);
6006 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6012 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6013 /// elements can be replaced by a single large load which has the same value as
6014 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6016 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6018 /// FIXME: we'd also like to handle the case where the last elements are zero
6019 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6020 /// There's even a handy isZeroNode for that purpose.
6021 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6022 SDLoc &DL, SelectionDAG &DAG,
6023 bool isAfterLegalize) {
6024 unsigned NumElems = Elts.size();
6026 LoadSDNode *LDBase = nullptr;
6027 unsigned LastLoadedElt = -1U;
6029 // For each element in the initializer, see if we've found a load or an undef.
6030 // If we don't find an initial load element, or later load elements are
6031 // non-consecutive, bail out.
6032 for (unsigned i = 0; i < NumElems; ++i) {
6033 SDValue Elt = Elts[i];
6034 // Look through a bitcast.
6035 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
6036 Elt = Elt.getOperand(0);
6037 if (!Elt.getNode() ||
6038 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6041 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6043 LDBase = cast<LoadSDNode>(Elt.getNode());
6047 if (Elt.getOpcode() == ISD::UNDEF)
6050 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6051 EVT LdVT = Elt.getValueType();
6052 // Each loaded element must be the correct fractional portion of the
6053 // requested vector load.
6054 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
6056 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
6061 // If we have found an entire vector of loads and undefs, then return a large
6062 // load of the entire vector width starting at the base pointer. If we found
6063 // consecutive loads for the low half, generate a vzext_load node.
6064 if (LastLoadedElt == NumElems - 1) {
6065 assert(LDBase && "Did not find base load for merging consecutive loads");
6066 EVT EltVT = LDBase->getValueType(0);
6067 // Ensure that the input vector size for the merged loads matches the
6068 // cumulative size of the input elements.
6069 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
6072 if (isAfterLegalize &&
6073 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6076 SDValue NewLd = SDValue();
6078 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6079 LDBase->getPointerInfo(), LDBase->isVolatile(),
6080 LDBase->isNonTemporal(), LDBase->isInvariant(),
6081 LDBase->getAlignment());
6083 if (LDBase->hasAnyUseOfValue(1)) {
6084 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6086 SDValue(NewLd.getNode(), 1));
6087 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6088 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6089 SDValue(NewLd.getNode(), 1));
6095 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6096 //of a v4i32 / v4f32. It's probably worth generalizing.
6097 EVT EltVT = VT.getVectorElementType();
6098 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6099 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6100 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6101 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6103 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6104 LDBase->getPointerInfo(),
6105 LDBase->getAlignment(),
6106 false/*isVolatile*/, true/*ReadMem*/,
6109 // Make sure the newly-created LOAD is in the same position as LDBase in
6110 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6111 // update uses of LDBase's output chain to use the TokenFactor.
6112 if (LDBase->hasAnyUseOfValue(1)) {
6113 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6114 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6115 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6116 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6117 SDValue(ResNode.getNode(), 1));
6120 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6125 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6126 /// to generate a splat value for the following cases:
6127 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6128 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6129 /// a scalar load, or a constant.
6130 /// The VBROADCAST node is returned when a pattern is found,
6131 /// or SDValue() otherwise.
6132 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6133 SelectionDAG &DAG) {
6134 // VBROADCAST requires AVX.
6135 // TODO: Splats could be generated for non-AVX CPUs using SSE
6136 // instructions, but there's less potential gain for only 128-bit vectors.
6137 if (!Subtarget->hasAVX())
6140 MVT VT = Op.getSimpleValueType();
6143 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6144 "Unsupported vector type for broadcast.");
6149 switch (Op.getOpcode()) {
6151 // Unknown pattern found.
6154 case ISD::BUILD_VECTOR: {
6155 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6156 BitVector UndefElements;
6157 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6159 // We need a splat of a single value to use broadcast, and it doesn't
6160 // make any sense if the value is only in one element of the vector.
6161 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6165 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6166 Ld.getOpcode() == ISD::ConstantFP);
6168 // Make sure that all of the users of a non-constant load are from the
6169 // BUILD_VECTOR node.
6170 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6175 case ISD::VECTOR_SHUFFLE: {
6176 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6178 // Shuffles must have a splat mask where the first element is
6180 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6183 SDValue Sc = Op.getOperand(0);
6184 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6185 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6187 if (!Subtarget->hasInt256())
6190 // Use the register form of the broadcast instruction available on AVX2.
6191 if (VT.getSizeInBits() >= 256)
6192 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6193 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6196 Ld = Sc.getOperand(0);
6197 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6198 Ld.getOpcode() == ISD::ConstantFP);
6200 // The scalar_to_vector node and the suspected
6201 // load node must have exactly one user.
6202 // Constants may have multiple users.
6204 // AVX-512 has register version of the broadcast
6205 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6206 Ld.getValueType().getSizeInBits() >= 32;
6207 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6214 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6215 bool IsGE256 = (VT.getSizeInBits() >= 256);
6217 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6218 // instruction to save 8 or more bytes of constant pool data.
6219 // TODO: If multiple splats are generated to load the same constant,
6220 // it may be detrimental to overall size. There needs to be a way to detect
6221 // that condition to know if this is truly a size win.
6222 const Function *F = DAG.getMachineFunction().getFunction();
6223 bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
6225 // Handle broadcasting a single constant scalar from the constant pool
6227 // On Sandybridge (no AVX2), it is still better to load a constant vector
6228 // from the constant pool and not to broadcast it from a scalar.
6229 // But override that restriction when optimizing for size.
6230 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6231 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6232 EVT CVT = Ld.getValueType();
6233 assert(!CVT.isVector() && "Must not broadcast a vector type");
6235 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6236 // For size optimization, also splat v2f64 and v2i64, and for size opt
6237 // with AVX2, also splat i8 and i16.
6238 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6239 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6240 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6241 const Constant *C = nullptr;
6242 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6243 C = CI->getConstantIntValue();
6244 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6245 C = CF->getConstantFPValue();
6247 assert(C && "Invalid constant type");
6249 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6250 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6251 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6252 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6253 MachinePointerInfo::getConstantPool(),
6254 false, false, false, Alignment);
6256 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6260 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6262 // Handle AVX2 in-register broadcasts.
6263 if (!IsLoad && Subtarget->hasInt256() &&
6264 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6265 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6267 // The scalar source must be a normal load.
6271 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6272 (Subtarget->hasVLX() && ScalarSize == 64))
6273 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6275 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6276 // double since there is no vbroadcastsd xmm
6277 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6278 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6279 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6282 // Unsupported broadcast.
6286 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6287 /// underlying vector and index.
6289 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6291 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6293 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6294 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6297 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6299 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6301 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6302 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6305 // In this case the vector is the extract_subvector expression and the index
6306 // is 2, as specified by the shuffle.
6307 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6308 SDValue ShuffleVec = SVOp->getOperand(0);
6309 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6310 assert(ShuffleVecVT.getVectorElementType() ==
6311 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6313 int ShuffleIdx = SVOp->getMaskElt(Idx);
6314 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6315 ExtractedFromVec = ShuffleVec;
6321 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6322 MVT VT = Op.getSimpleValueType();
6324 // Skip if insert_vec_elt is not supported.
6325 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6326 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6330 unsigned NumElems = Op.getNumOperands();
6334 SmallVector<unsigned, 4> InsertIndices;
6335 SmallVector<int, 8> Mask(NumElems, -1);
6337 for (unsigned i = 0; i != NumElems; ++i) {
6338 unsigned Opc = Op.getOperand(i).getOpcode();
6340 if (Opc == ISD::UNDEF)
6343 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6344 // Quit if more than 1 elements need inserting.
6345 if (InsertIndices.size() > 1)
6348 InsertIndices.push_back(i);
6352 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6353 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6354 // Quit if non-constant index.
6355 if (!isa<ConstantSDNode>(ExtIdx))
6357 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6359 // Quit if extracted from vector of different type.
6360 if (ExtractedFromVec.getValueType() != VT)
6363 if (!VecIn1.getNode())
6364 VecIn1 = ExtractedFromVec;
6365 else if (VecIn1 != ExtractedFromVec) {
6366 if (!VecIn2.getNode())
6367 VecIn2 = ExtractedFromVec;
6368 else if (VecIn2 != ExtractedFromVec)
6369 // Quit if more than 2 vectors to shuffle
6373 if (ExtractedFromVec == VecIn1)
6375 else if (ExtractedFromVec == VecIn2)
6376 Mask[i] = Idx + NumElems;
6379 if (!VecIn1.getNode())
6382 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6383 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6384 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6385 unsigned Idx = InsertIndices[i];
6386 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6387 DAG.getIntPtrConstant(Idx));
6393 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6395 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6397 MVT VT = Op.getSimpleValueType();
6398 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6399 "Unexpected type in LowerBUILD_VECTORvXi1!");
6402 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6403 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6404 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6405 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6408 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6409 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6410 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6411 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6414 bool AllContants = true;
6415 uint64_t Immediate = 0;
6416 int NonConstIdx = -1;
6417 bool IsSplat = true;
6418 unsigned NumNonConsts = 0;
6419 unsigned NumConsts = 0;
6420 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6421 SDValue In = Op.getOperand(idx);
6422 if (In.getOpcode() == ISD::UNDEF)
6424 if (!isa<ConstantSDNode>(In)) {
6425 AllContants = false;
6430 if (cast<ConstantSDNode>(In)->getZExtValue())
6431 Immediate |= (1ULL << idx);
6433 if (In != Op.getOperand(0))
6438 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6439 DAG.getConstant(Immediate, MVT::i16));
6440 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6441 DAG.getIntPtrConstant(0));
6444 if (NumNonConsts == 1 && NonConstIdx != 0) {
6447 SDValue VecAsImm = DAG.getConstant(Immediate,
6448 MVT::getIntegerVT(VT.getSizeInBits()));
6449 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6452 DstVec = DAG.getUNDEF(VT);
6453 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6454 Op.getOperand(NonConstIdx),
6455 DAG.getIntPtrConstant(NonConstIdx));
6457 if (!IsSplat && (NonConstIdx != 0))
6458 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6459 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6462 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6463 DAG.getConstant(-1, SelectVT),
6464 DAG.getConstant(0, SelectVT));
6466 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6467 DAG.getConstant((Immediate | 1), SelectVT),
6468 DAG.getConstant(Immediate, SelectVT));
6469 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6472 /// \brief Return true if \p N implements a horizontal binop and return the
6473 /// operands for the horizontal binop into V0 and V1.
6475 /// This is a helper function of PerformBUILD_VECTORCombine.
6476 /// This function checks that the build_vector \p N in input implements a
6477 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6478 /// operation to match.
6479 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6480 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6481 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6484 /// This function only analyzes elements of \p N whose indices are
6485 /// in range [BaseIdx, LastIdx).
6486 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6488 unsigned BaseIdx, unsigned LastIdx,
6489 SDValue &V0, SDValue &V1) {
6490 EVT VT = N->getValueType(0);
6492 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6493 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6494 "Invalid Vector in input!");
6496 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6497 bool CanFold = true;
6498 unsigned ExpectedVExtractIdx = BaseIdx;
6499 unsigned NumElts = LastIdx - BaseIdx;
6500 V0 = DAG.getUNDEF(VT);
6501 V1 = DAG.getUNDEF(VT);
6503 // Check if N implements a horizontal binop.
6504 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6505 SDValue Op = N->getOperand(i + BaseIdx);
6508 if (Op->getOpcode() == ISD::UNDEF) {
6509 // Update the expected vector extract index.
6510 if (i * 2 == NumElts)
6511 ExpectedVExtractIdx = BaseIdx;
6512 ExpectedVExtractIdx += 2;
6516 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6521 SDValue Op0 = Op.getOperand(0);
6522 SDValue Op1 = Op.getOperand(1);
6524 // Try to match the following pattern:
6525 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6526 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6527 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6528 Op0.getOperand(0) == Op1.getOperand(0) &&
6529 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6530 isa<ConstantSDNode>(Op1.getOperand(1)));
6534 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6535 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6537 if (i * 2 < NumElts) {
6538 if (V0.getOpcode() == ISD::UNDEF)
6539 V0 = Op0.getOperand(0);
6541 if (V1.getOpcode() == ISD::UNDEF)
6542 V1 = Op0.getOperand(0);
6543 if (i * 2 == NumElts)
6544 ExpectedVExtractIdx = BaseIdx;
6547 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6548 if (I0 == ExpectedVExtractIdx)
6549 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6550 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6551 // Try to match the following dag sequence:
6552 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6553 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6557 ExpectedVExtractIdx += 2;
6563 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6564 /// a concat_vector.
6566 /// This is a helper function of PerformBUILD_VECTORCombine.
6567 /// This function expects two 256-bit vectors called V0 and V1.
6568 /// At first, each vector is split into two separate 128-bit vectors.
6569 /// Then, the resulting 128-bit vectors are used to implement two
6570 /// horizontal binary operations.
6572 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6574 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6575 /// the two new horizontal binop.
6576 /// When Mode is set, the first horizontal binop dag node would take as input
6577 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6578 /// horizontal binop dag node would take as input the lower 128-bit of V1
6579 /// and the upper 128-bit of V1.
6581 /// HADD V0_LO, V0_HI
6582 /// HADD V1_LO, V1_HI
6584 /// Otherwise, the first horizontal binop dag node takes as input the lower
6585 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6586 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6588 /// HADD V0_LO, V1_LO
6589 /// HADD V0_HI, V1_HI
6591 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6592 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6593 /// the upper 128-bits of the result.
6594 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6595 SDLoc DL, SelectionDAG &DAG,
6596 unsigned X86Opcode, bool Mode,
6597 bool isUndefLO, bool isUndefHI) {
6598 EVT VT = V0.getValueType();
6599 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6600 "Invalid nodes in input!");
6602 unsigned NumElts = VT.getVectorNumElements();
6603 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6604 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6605 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6606 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6607 EVT NewVT = V0_LO.getValueType();
6609 SDValue LO = DAG.getUNDEF(NewVT);
6610 SDValue HI = DAG.getUNDEF(NewVT);
6613 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6614 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6615 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6616 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6617 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6619 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6620 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6621 V1_LO->getOpcode() != ISD::UNDEF))
6622 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6624 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6625 V1_HI->getOpcode() != ISD::UNDEF))
6626 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6629 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6632 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6633 /// sequence of 'vadd + vsub + blendi'.
6634 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6635 const X86Subtarget *Subtarget) {
6637 EVT VT = BV->getValueType(0);
6638 unsigned NumElts = VT.getVectorNumElements();
6639 SDValue InVec0 = DAG.getUNDEF(VT);
6640 SDValue InVec1 = DAG.getUNDEF(VT);
6642 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6643 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6645 // Odd-numbered elements in the input build vector are obtained from
6646 // adding two integer/float elements.
6647 // Even-numbered elements in the input build vector are obtained from
6648 // subtracting two integer/float elements.
6649 unsigned ExpectedOpcode = ISD::FSUB;
6650 unsigned NextExpectedOpcode = ISD::FADD;
6651 bool AddFound = false;
6652 bool SubFound = false;
6654 for (unsigned i = 0, e = NumElts; i != e; ++i) {
6655 SDValue Op = BV->getOperand(i);
6657 // Skip 'undef' values.
6658 unsigned Opcode = Op.getOpcode();
6659 if (Opcode == ISD::UNDEF) {
6660 std::swap(ExpectedOpcode, NextExpectedOpcode);
6664 // Early exit if we found an unexpected opcode.
6665 if (Opcode != ExpectedOpcode)
6668 SDValue Op0 = Op.getOperand(0);
6669 SDValue Op1 = Op.getOperand(1);
6671 // Try to match the following pattern:
6672 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6673 // Early exit if we cannot match that sequence.
6674 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6675 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6676 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6677 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6678 Op0.getOperand(1) != Op1.getOperand(1))
6681 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6685 // We found a valid add/sub node. Update the information accordingly.
6691 // Update InVec0 and InVec1.
6692 if (InVec0.getOpcode() == ISD::UNDEF)
6693 InVec0 = Op0.getOperand(0);
6694 if (InVec1.getOpcode() == ISD::UNDEF)
6695 InVec1 = Op1.getOperand(0);
6697 // Make sure that operands in input to each add/sub node always
6698 // come from a same pair of vectors.
6699 if (InVec0 != Op0.getOperand(0)) {
6700 if (ExpectedOpcode == ISD::FSUB)
6703 // FADD is commutable. Try to commute the operands
6704 // and then test again.
6705 std::swap(Op0, Op1);
6706 if (InVec0 != Op0.getOperand(0))
6710 if (InVec1 != Op1.getOperand(0))
6713 // Update the pair of expected opcodes.
6714 std::swap(ExpectedOpcode, NextExpectedOpcode);
6717 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6718 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6719 InVec1.getOpcode() != ISD::UNDEF)
6720 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6725 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6726 const X86Subtarget *Subtarget) {
6728 EVT VT = N->getValueType(0);
6729 unsigned NumElts = VT.getVectorNumElements();
6730 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6731 SDValue InVec0, InVec1;
6733 // Try to match an ADDSUB.
6734 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6735 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6736 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6737 if (Value.getNode())
6741 // Try to match horizontal ADD/SUB.
6742 unsigned NumUndefsLO = 0;
6743 unsigned NumUndefsHI = 0;
6744 unsigned Half = NumElts/2;
6746 // Count the number of UNDEF operands in the build_vector in input.
6747 for (unsigned i = 0, e = Half; i != e; ++i)
6748 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6751 for (unsigned i = Half, e = NumElts; i != e; ++i)
6752 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6755 // Early exit if this is either a build_vector of all UNDEFs or all the
6756 // operands but one are UNDEF.
6757 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6760 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6761 // Try to match an SSE3 float HADD/HSUB.
6762 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6763 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6765 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6766 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6767 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6768 // Try to match an SSSE3 integer HADD/HSUB.
6769 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6770 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6772 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6773 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6776 if (!Subtarget->hasAVX())
6779 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6780 // Try to match an AVX horizontal add/sub of packed single/double
6781 // precision floating point values from 256-bit vectors.
6782 SDValue InVec2, InVec3;
6783 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6784 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6785 ((InVec0.getOpcode() == ISD::UNDEF ||
6786 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6787 ((InVec1.getOpcode() == ISD::UNDEF ||
6788 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6789 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6791 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6792 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6793 ((InVec0.getOpcode() == ISD::UNDEF ||
6794 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6795 ((InVec1.getOpcode() == ISD::UNDEF ||
6796 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6797 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6798 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6799 // Try to match an AVX2 horizontal add/sub of signed integers.
6800 SDValue InVec2, InVec3;
6802 bool CanFold = true;
6804 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6805 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6806 ((InVec0.getOpcode() == ISD::UNDEF ||
6807 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6808 ((InVec1.getOpcode() == ISD::UNDEF ||
6809 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6810 X86Opcode = X86ISD::HADD;
6811 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6812 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6813 ((InVec0.getOpcode() == ISD::UNDEF ||
6814 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6815 ((InVec1.getOpcode() == ISD::UNDEF ||
6816 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6817 X86Opcode = X86ISD::HSUB;
6822 // Fold this build_vector into a single horizontal add/sub.
6823 // Do this only if the target has AVX2.
6824 if (Subtarget->hasAVX2())
6825 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6827 // Do not try to expand this build_vector into a pair of horizontal
6828 // add/sub if we can emit a pair of scalar add/sub.
6829 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6832 // Convert this build_vector into a pair of horizontal binop followed by
6834 bool isUndefLO = NumUndefsLO == Half;
6835 bool isUndefHI = NumUndefsHI == Half;
6836 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6837 isUndefLO, isUndefHI);
6841 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6842 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6844 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6845 X86Opcode = X86ISD::HADD;
6846 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6847 X86Opcode = X86ISD::HSUB;
6848 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6849 X86Opcode = X86ISD::FHADD;
6850 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6851 X86Opcode = X86ISD::FHSUB;
6855 // Don't try to expand this build_vector into a pair of horizontal add/sub
6856 // if we can simply emit a pair of scalar add/sub.
6857 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6860 // Convert this build_vector into two horizontal add/sub followed by
6862 bool isUndefLO = NumUndefsLO == Half;
6863 bool isUndefHI = NumUndefsHI == Half;
6864 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6865 isUndefLO, isUndefHI);
6872 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6875 MVT VT = Op.getSimpleValueType();
6876 MVT ExtVT = VT.getVectorElementType();
6877 unsigned NumElems = Op.getNumOperands();
6879 // Generate vectors for predicate vectors.
6880 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6881 return LowerBUILD_VECTORvXi1(Op, DAG);
6883 // Vectors containing all zeros can be matched by pxor and xorps later
6884 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6885 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6886 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6887 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6890 return getZeroVector(VT, Subtarget, DAG, dl);
6893 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6894 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6895 // vpcmpeqd on 256-bit vectors.
6896 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6897 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6900 if (!VT.is512BitVector())
6901 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6904 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6905 if (Broadcast.getNode())
6908 unsigned EVTBits = ExtVT.getSizeInBits();
6910 unsigned NumZero = 0;
6911 unsigned NumNonZero = 0;
6912 unsigned NonZeros = 0;
6913 bool IsAllConstants = true;
6914 SmallSet<SDValue, 8> Values;
6915 for (unsigned i = 0; i < NumElems; ++i) {
6916 SDValue Elt = Op.getOperand(i);
6917 if (Elt.getOpcode() == ISD::UNDEF)
6920 if (Elt.getOpcode() != ISD::Constant &&
6921 Elt.getOpcode() != ISD::ConstantFP)
6922 IsAllConstants = false;
6923 if (X86::isZeroNode(Elt))
6926 NonZeros |= (1 << i);
6931 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6932 if (NumNonZero == 0)
6933 return DAG.getUNDEF(VT);
6935 // Special case for single non-zero, non-undef, element.
6936 if (NumNonZero == 1) {
6937 unsigned Idx = countTrailingZeros(NonZeros);
6938 SDValue Item = Op.getOperand(Idx);
6940 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6941 // the value are obviously zero, truncate the value to i32 and do the
6942 // insertion that way. Only do this if the value is non-constant or if the
6943 // value is a constant being inserted into element 0. It is cheaper to do
6944 // a constant pool load than it is to do a movd + shuffle.
6945 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6946 (!IsAllConstants || Idx == 0)) {
6947 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6949 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6950 EVT VecVT = MVT::v4i32;
6951 unsigned VecElts = 4;
6953 // Truncate the value (which may itself be a constant) to i32, and
6954 // convert it to a vector with movd (S2V+shuffle to zero extend).
6955 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6956 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6958 // If using the new shuffle lowering, just directly insert this.
6959 if (ExperimentalVectorShuffleLowering)
6961 ISD::BITCAST, dl, VT,
6962 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6964 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6966 // Now we have our 32-bit value zero extended in the low element of
6967 // a vector. If Idx != 0, swizzle it into place.
6969 SmallVector<int, 4> Mask;
6970 Mask.push_back(Idx);
6971 for (unsigned i = 1; i != VecElts; ++i)
6973 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6976 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6980 // If we have a constant or non-constant insertion into the low element of
6981 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6982 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6983 // depending on what the source datatype is.
6986 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6988 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6989 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6990 if (VT.is256BitVector() || VT.is512BitVector()) {
6991 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6992 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6993 Item, DAG.getIntPtrConstant(0));
6995 assert(VT.is128BitVector() && "Expected an SSE value type!");
6996 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6997 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
6998 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7001 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7002 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7003 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7004 if (VT.is256BitVector()) {
7005 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7006 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7008 assert(VT.is128BitVector() && "Expected an SSE value type!");
7009 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7011 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7015 // Is it a vector logical left shift?
7016 if (NumElems == 2 && Idx == 1 &&
7017 X86::isZeroNode(Op.getOperand(0)) &&
7018 !X86::isZeroNode(Op.getOperand(1))) {
7019 unsigned NumBits = VT.getSizeInBits();
7020 return getVShift(true, VT,
7021 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7022 VT, Op.getOperand(1)),
7023 NumBits/2, DAG, *this, dl);
7026 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7029 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7030 // is a non-constant being inserted into an element other than the low one,
7031 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7032 // movd/movss) to move this into the low element, then shuffle it into
7034 if (EVTBits == 32) {
7035 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7037 // If using the new shuffle lowering, just directly insert this.
7038 if (ExperimentalVectorShuffleLowering)
7039 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7041 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7042 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7043 SmallVector<int, 8> MaskVec;
7044 for (unsigned i = 0; i != NumElems; ++i)
7045 MaskVec.push_back(i == Idx ? 0 : 1);
7046 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7050 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7051 if (Values.size() == 1) {
7052 if (EVTBits == 32) {
7053 // Instead of a shuffle like this:
7054 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7055 // Check if it's possible to issue this instead.
7056 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7057 unsigned Idx = countTrailingZeros(NonZeros);
7058 SDValue Item = Op.getOperand(Idx);
7059 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7060 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7065 // A vector full of immediates; various special cases are already
7066 // handled, so this is best done with a single constant-pool load.
7070 // For AVX-length vectors, see if we can use a vector load to get all of the
7071 // elements, otherwise build the individual 128-bit pieces and use
7072 // shuffles to put them in place.
7073 if (VT.is256BitVector() || VT.is512BitVector()) {
7074 SmallVector<SDValue, 64> V;
7075 for (unsigned i = 0; i != NumElems; ++i)
7076 V.push_back(Op.getOperand(i));
7078 // Check for a build vector of consecutive loads.
7079 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7082 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7084 // Build both the lower and upper subvector.
7085 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7086 makeArrayRef(&V[0], NumElems/2));
7087 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7088 makeArrayRef(&V[NumElems / 2], NumElems/2));
7090 // Recreate the wider vector with the lower and upper part.
7091 if (VT.is256BitVector())
7092 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7093 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7096 // Let legalizer expand 2-wide build_vectors.
7097 if (EVTBits == 64) {
7098 if (NumNonZero == 1) {
7099 // One half is zero or undef.
7100 unsigned Idx = countTrailingZeros(NonZeros);
7101 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7102 Op.getOperand(Idx));
7103 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7108 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7109 if (EVTBits == 8 && NumElems == 16) {
7110 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7112 if (V.getNode()) return V;
7115 if (EVTBits == 16 && NumElems == 8) {
7116 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7118 if (V.getNode()) return V;
7121 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7122 if (EVTBits == 32 && NumElems == 4) {
7123 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7128 // If element VT is == 32 bits, turn it into a number of shuffles.
7129 SmallVector<SDValue, 8> V(NumElems);
7130 if (NumElems == 4 && NumZero > 0) {
7131 for (unsigned i = 0; i < 4; ++i) {
7132 bool isZero = !(NonZeros & (1 << i));
7134 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7136 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7139 for (unsigned i = 0; i < 2; ++i) {
7140 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7143 V[i] = V[i*2]; // Must be a zero vector.
7146 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7149 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7152 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7157 bool Reverse1 = (NonZeros & 0x3) == 2;
7158 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7162 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7163 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7165 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7168 if (Values.size() > 1 && VT.is128BitVector()) {
7169 // Check for a build vector of consecutive loads.
7170 for (unsigned i = 0; i < NumElems; ++i)
7171 V[i] = Op.getOperand(i);
7173 // Check for elements which are consecutive loads.
7174 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7178 // Check for a build vector from mostly shuffle plus few inserting.
7179 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7183 // For SSE 4.1, use insertps to put the high elements into the low element.
7184 if (Subtarget->hasSSE41()) {
7186 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7187 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7189 Result = DAG.getUNDEF(VT);
7191 for (unsigned i = 1; i < NumElems; ++i) {
7192 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7193 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7194 Op.getOperand(i), DAG.getIntPtrConstant(i));
7199 // Otherwise, expand into a number of unpckl*, start by extending each of
7200 // our (non-undef) elements to the full vector width with the element in the
7201 // bottom slot of the vector (which generates no code for SSE).
7202 for (unsigned i = 0; i < NumElems; ++i) {
7203 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7204 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7206 V[i] = DAG.getUNDEF(VT);
7209 // Next, we iteratively mix elements, e.g. for v4f32:
7210 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7211 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7212 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7213 unsigned EltStride = NumElems >> 1;
7214 while (EltStride != 0) {
7215 for (unsigned i = 0; i < EltStride; ++i) {
7216 // If V[i+EltStride] is undef and this is the first round of mixing,
7217 // then it is safe to just drop this shuffle: V[i] is already in the
7218 // right place, the one element (since it's the first round) being
7219 // inserted as undef can be dropped. This isn't safe for successive
7220 // rounds because they will permute elements within both vectors.
7221 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7222 EltStride == NumElems/2)
7225 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7234 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7235 // to create 256-bit vectors from two other 128-bit ones.
7236 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7238 MVT ResVT = Op.getSimpleValueType();
7240 assert((ResVT.is256BitVector() ||
7241 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7243 SDValue V1 = Op.getOperand(0);
7244 SDValue V2 = Op.getOperand(1);
7245 unsigned NumElems = ResVT.getVectorNumElements();
7246 if(ResVT.is256BitVector())
7247 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7249 if (Op.getNumOperands() == 4) {
7250 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7251 ResVT.getVectorNumElements()/2);
7252 SDValue V3 = Op.getOperand(2);
7253 SDValue V4 = Op.getOperand(3);
7254 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7255 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7257 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7260 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7261 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7262 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7263 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7264 Op.getNumOperands() == 4)));
7266 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7267 // from two other 128-bit ones.
7269 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7270 return LowerAVXCONCAT_VECTORS(Op, DAG);
7274 //===----------------------------------------------------------------------===//
7275 // Vector shuffle lowering
7277 // This is an experimental code path for lowering vector shuffles on x86. It is
7278 // designed to handle arbitrary vector shuffles and blends, gracefully
7279 // degrading performance as necessary. It works hard to recognize idiomatic
7280 // shuffles and lower them to optimal instruction patterns without leaving
7281 // a framework that allows reasonably efficient handling of all vector shuffle
7283 //===----------------------------------------------------------------------===//
7285 /// \brief Tiny helper function to identify a no-op mask.
7287 /// This is a somewhat boring predicate function. It checks whether the mask
7288 /// array input, which is assumed to be a single-input shuffle mask of the kind
7289 /// used by the X86 shuffle instructions (not a fully general
7290 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7291 /// in-place shuffle are 'no-op's.
7292 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7293 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7294 if (Mask[i] != -1 && Mask[i] != i)
7299 /// \brief Helper function to classify a mask as a single-input mask.
7301 /// This isn't a generic single-input test because in the vector shuffle
7302 /// lowering we canonicalize single inputs to be the first input operand. This
7303 /// means we can more quickly test for a single input by only checking whether
7304 /// an input from the second operand exists. We also assume that the size of
7305 /// mask corresponds to the size of the input vectors which isn't true in the
7306 /// fully general case.
7307 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7309 if (M >= (int)Mask.size())
7314 /// \brief Test whether there are elements crossing 128-bit lanes in this
7317 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7318 /// and we routinely test for these.
7319 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7320 int LaneSize = 128 / VT.getScalarSizeInBits();
7321 int Size = Mask.size();
7322 for (int i = 0; i < Size; ++i)
7323 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7328 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7330 /// This checks a shuffle mask to see if it is performing the same
7331 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7332 /// that it is also not lane-crossing. It may however involve a blend from the
7333 /// same lane of a second vector.
7335 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7336 /// non-trivial to compute in the face of undef lanes. The representation is
7337 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7338 /// entries from both V1 and V2 inputs to the wider mask.
7340 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7341 SmallVectorImpl<int> &RepeatedMask) {
7342 int LaneSize = 128 / VT.getScalarSizeInBits();
7343 RepeatedMask.resize(LaneSize, -1);
7344 int Size = Mask.size();
7345 for (int i = 0; i < Size; ++i) {
7348 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7349 // This entry crosses lanes, so there is no way to model this shuffle.
7352 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7353 if (RepeatedMask[i % LaneSize] == -1)
7354 // This is the first non-undef entry in this slot of a 128-bit lane.
7355 RepeatedMask[i % LaneSize] =
7356 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7357 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7358 // Found a mismatch with the repeated mask.
7364 /// \brief Base case helper for testing a single mask element.
7365 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7366 BuildVectorSDNode *BV1,
7367 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7369 int Size = Mask.size();
7370 if (Mask[i] != -1 && Mask[i] != Arg) {
7371 auto *MaskBV = Mask[i] < Size ? BV1 : BV2;
7372 auto *ArgsBV = Arg < Size ? BV1 : BV2;
7373 if (!MaskBV || !ArgsBV ||
7374 MaskBV->getOperand(Mask[i] % Size) != ArgsBV->getOperand(Arg % Size))
7380 /// \brief Recursive helper to peel off and test each mask element.
7381 template <typename... Ts>
7382 static bool isShuffleEquivalentImpl(SDValue V1, SDValue V2,
7383 BuildVectorSDNode *BV1,
7384 BuildVectorSDNode *BV2, ArrayRef<int> Mask,
7385 int i, int Arg, Ts... Args) {
7386 if (!isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i, Arg))
7389 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, i + 1, Args...);
7392 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7395 /// This is a fast way to test a shuffle mask against a fixed pattern:
7397 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7399 /// It returns true if the mask is exactly as wide as the argument list, and
7400 /// each element of the mask is either -1 (signifying undef) or the value given
7401 /// in the argument.
7402 template <typename... Ts>
7403 static bool isShuffleEquivalent(SDValue V1, SDValue V2, ArrayRef<int> Mask,
7405 if (Mask.size() != sizeof...(Args))
7408 // If the values are build vectors, we can look through them to find
7409 // equivalent inputs that make the shuffles equivalent.
7410 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
7411 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
7413 // Recursively peel off arguments and test them against the mask.
7414 return isShuffleEquivalentImpl(V1, V2, BV1, BV2, Mask, 0, Args...);
7417 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7419 /// This helper function produces an 8-bit shuffle immediate corresponding to
7420 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7421 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7424 /// NB: We rely heavily on "undef" masks preserving the input lane.
7425 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7426 SelectionDAG &DAG) {
7427 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7428 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7429 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7430 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7431 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7434 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7435 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7436 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7437 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7438 return DAG.getConstant(Imm, MVT::i8);
7441 /// \brief Try to emit a blend instruction for a shuffle.
7443 /// This doesn't do any checks for the availability of instructions for blending
7444 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7445 /// be matched in the backend with the type given. What it does check for is
7446 /// that the shuffle mask is in fact a blend.
7447 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7448 SDValue V2, ArrayRef<int> Mask,
7449 const X86Subtarget *Subtarget,
7450 SelectionDAG &DAG) {
7452 unsigned BlendMask = 0;
7453 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7454 if (Mask[i] >= Size) {
7455 if (Mask[i] != i + Size)
7456 return SDValue(); // Shuffled V2 input!
7457 BlendMask |= 1u << i;
7460 if (Mask[i] >= 0 && Mask[i] != i)
7461 return SDValue(); // Shuffled V1 input!
7463 switch (VT.SimpleTy) {
7468 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7469 DAG.getConstant(BlendMask, MVT::i8));
7473 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7477 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7478 // that instruction.
7479 if (Subtarget->hasAVX2()) {
7480 // Scale the blend by the number of 32-bit dwords per element.
7481 int Scale = VT.getScalarSizeInBits() / 32;
7483 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7484 if (Mask[i] >= Size)
7485 for (int j = 0; j < Scale; ++j)
7486 BlendMask |= 1u << (i * Scale + j);
7488 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7489 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7490 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7491 return DAG.getNode(ISD::BITCAST, DL, VT,
7492 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7493 DAG.getConstant(BlendMask, MVT::i8)));
7497 // For integer shuffles we need to expand the mask and cast the inputs to
7498 // v8i16s prior to blending.
7499 int Scale = 8 / VT.getVectorNumElements();
7501 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7502 if (Mask[i] >= Size)
7503 for (int j = 0; j < Scale; ++j)
7504 BlendMask |= 1u << (i * Scale + j);
7506 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7507 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7508 return DAG.getNode(ISD::BITCAST, DL, VT,
7509 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7510 DAG.getConstant(BlendMask, MVT::i8)));
7514 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7515 SmallVector<int, 8> RepeatedMask;
7516 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7517 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7518 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7520 for (int i = 0; i < 8; ++i)
7521 if (RepeatedMask[i] >= 16)
7522 BlendMask |= 1u << i;
7523 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7524 DAG.getConstant(BlendMask, MVT::i8));
7530 // Scale the blend by the number of bytes per element.
7531 int Scale = VT.getScalarSizeInBits() / 8;
7533 // This form of blend is always done on bytes. Compute the byte vector
7535 MVT BlendVT = MVT::getVectorVT(MVT::i8, VT.getSizeInBits() / 8);
7537 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7538 // mix of LLVM's code generator and the x86 backend. We tell the code
7539 // generator that boolean values in the elements of an x86 vector register
7540 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7541 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7542 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7543 // of the element (the remaining are ignored) and 0 in that high bit would
7544 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7545 // the LLVM model for boolean values in vector elements gets the relevant
7546 // bit set, it is set backwards and over constrained relative to x86's
7548 SmallVector<SDValue, 32> VSELECTMask;
7549 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7550 for (int j = 0; j < Scale; ++j)
7551 VSELECTMask.push_back(
7552 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7553 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8));
7555 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7556 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7558 ISD::BITCAST, DL, VT,
7559 DAG.getNode(ISD::VSELECT, DL, BlendVT,
7560 DAG.getNode(ISD::BUILD_VECTOR, DL, BlendVT, VSELECTMask),
7565 llvm_unreachable("Not a supported integer vector type!");
7569 /// \brief Try to lower as a blend of elements from two inputs followed by
7570 /// a single-input permutation.
7572 /// This matches the pattern where we can blend elements from two inputs and
7573 /// then reduce the shuffle to a single-input permutation.
7574 static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
7577 SelectionDAG &DAG) {
7578 // We build up the blend mask while checking whether a blend is a viable way
7579 // to reduce the shuffle.
7580 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7581 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
7583 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7587 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
7589 if (BlendMask[Mask[i] % Size] == -1)
7590 BlendMask[Mask[i] % Size] = Mask[i];
7591 else if (BlendMask[Mask[i] % Size] != Mask[i])
7592 return SDValue(); // Can't blend in the needed input!
7594 PermuteMask[i] = Mask[i] % Size;
7597 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7598 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
7601 /// \brief Generic routine to decompose a shuffle and blend into indepndent
7602 /// blends and permutes.
7604 /// This matches the extremely common pattern for handling combined
7605 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7606 /// operations. It will try to pick the best arrangement of shuffles and
7608 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7612 SelectionDAG &DAG) {
7613 // Shuffle the input elements into the desired positions in V1 and V2 and
7614 // blend them together.
7615 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7616 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7617 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7618 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7619 if (Mask[i] >= 0 && Mask[i] < Size) {
7620 V1Mask[i] = Mask[i];
7622 } else if (Mask[i] >= Size) {
7623 V2Mask[i] = Mask[i] - Size;
7624 BlendMask[i] = i + Size;
7627 // Try to lower with the simpler initial blend strategy unless one of the
7628 // input shuffles would be a no-op. We prefer to shuffle inputs as the
7629 // shuffle may be able to fold with a load or other benefit. However, when
7630 // we'll have to do 2x as many shuffles in order to achieve this, blending
7631 // first is a better strategy.
7632 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
7633 if (SDValue BlendPerm =
7634 lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
7637 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7638 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7639 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7642 /// \brief Try to lower a vector shuffle as a byte rotation.
7644 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7645 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7646 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7647 /// try to generically lower a vector shuffle through such an pattern. It
7648 /// does not check for the profitability of lowering either as PALIGNR or
7649 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7650 /// This matches shuffle vectors that look like:
7652 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7654 /// Essentially it concatenates V1 and V2, shifts right by some number of
7655 /// elements, and takes the low elements as the result. Note that while this is
7656 /// specified as a *right shift* because x86 is little-endian, it is a *left
7657 /// rotate* of the vector lanes.
7658 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7661 const X86Subtarget *Subtarget,
7662 SelectionDAG &DAG) {
7663 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7665 int NumElts = Mask.size();
7666 int NumLanes = VT.getSizeInBits() / 128;
7667 int NumLaneElts = NumElts / NumLanes;
7669 // We need to detect various ways of spelling a rotation:
7670 // [11, 12, 13, 14, 15, 0, 1, 2]
7671 // [-1, 12, 13, 14, -1, -1, 1, -1]
7672 // [-1, -1, -1, -1, -1, -1, 1, 2]
7673 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7674 // [-1, 4, 5, 6, -1, -1, 9, -1]
7675 // [-1, 4, 5, 6, -1, -1, -1, -1]
7678 for (int l = 0; l < NumElts; l += NumLaneElts) {
7679 for (int i = 0; i < NumLaneElts; ++i) {
7680 if (Mask[l + i] == -1)
7682 assert(Mask[l + i] >= 0 && "Only -1 is a valid negative mask element!");
7684 // Get the mod-Size index and lane correct it.
7685 int LaneIdx = (Mask[l + i] % NumElts) - l;
7686 // Make sure it was in this lane.
7687 if (LaneIdx < 0 || LaneIdx >= NumLaneElts)
7690 // Determine where a rotated vector would have started.
7691 int StartIdx = i - LaneIdx;
7693 // The identity rotation isn't interesting, stop.
7696 // If we found the tail of a vector the rotation must be the missing
7697 // front. If we found the head of a vector, it must be how much of the
7699 int CandidateRotation = StartIdx < 0 ? -StartIdx : NumLaneElts - StartIdx;
7702 Rotation = CandidateRotation;
7703 else if (Rotation != CandidateRotation)
7704 // The rotations don't match, so we can't match this mask.
7707 // Compute which value this mask is pointing at.
7708 SDValue MaskV = Mask[l + i] < NumElts ? V1 : V2;
7710 // Compute which of the two target values this index should be assigned
7711 // to. This reflects whether the high elements are remaining or the low
7712 // elements are remaining.
7713 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7715 // Either set up this value if we've not encountered it before, or check
7716 // that it remains consistent.
7719 else if (TargetV != MaskV)
7720 // This may be a rotation, but it pulls from the inputs in some
7721 // unsupported interleaving.
7726 // Check that we successfully analyzed the mask, and normalize the results.
7727 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7728 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7734 // The actual rotate instruction rotates bytes, so we need to scale the
7735 // rotation based on how many bytes are in the vector lane.
7736 int Scale = 16 / NumLaneElts;
7738 // SSSE3 targets can use the palignr instruction.
7739 if (Subtarget->hasSSSE3()) {
7740 // Cast the inputs to i8 vector of correct length to match PALIGNR.
7741 MVT AlignVT = MVT::getVectorVT(MVT::i8, 16 * NumLanes);
7742 Lo = DAG.getNode(ISD::BITCAST, DL, AlignVT, Lo);
7743 Hi = DAG.getNode(ISD::BITCAST, DL, AlignVT, Hi);
7745 return DAG.getNode(ISD::BITCAST, DL, VT,
7746 DAG.getNode(X86ISD::PALIGNR, DL, AlignVT, Hi, Lo,
7747 DAG.getConstant(Rotation * Scale, MVT::i8)));
7750 assert(VT.getSizeInBits() == 128 &&
7751 "Rotate-based lowering only supports 128-bit lowering!");
7752 assert(Mask.size() <= 16 &&
7753 "Can shuffle at most 16 bytes in a 128-bit vector!");
7755 // Default SSE2 implementation
7756 int LoByteShift = 16 - Rotation * Scale;
7757 int HiByteShift = Rotation * Scale;
7759 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7760 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7761 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7763 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7764 DAG.getConstant(8 * LoByteShift, MVT::i8));
7765 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7766 DAG.getConstant(8 * HiByteShift, MVT::i8));
7767 return DAG.getNode(ISD::BITCAST, DL, VT,
7768 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7771 /// \brief Compute whether each element of a shuffle is zeroable.
7773 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7774 /// Either it is an undef element in the shuffle mask, the element of the input
7775 /// referenced is undef, or the element of the input referenced is known to be
7776 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7777 /// as many lanes with this technique as possible to simplify the remaining
7779 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7780 SDValue V1, SDValue V2) {
7781 SmallBitVector Zeroable(Mask.size(), false);
7783 while (V1.getOpcode() == ISD::BITCAST)
7784 V1 = V1->getOperand(0);
7785 while (V2.getOpcode() == ISD::BITCAST)
7786 V2 = V2->getOperand(0);
7788 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7789 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7791 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7793 // Handle the easy cases.
7794 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7799 // If this is an index into a build_vector node (which has the same number
7800 // of elements), dig out the input value and use it.
7801 SDValue V = M < Size ? V1 : V2;
7802 if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
7805 SDValue Input = V.getOperand(M % Size);
7806 // The UNDEF opcode check really should be dead code here, but not quite
7807 // worth asserting on (it isn't invalid, just unexpected).
7808 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7815 /// \brief Try to emit a bitmask instruction for a shuffle.
7817 /// This handles cases where we can model a blend exactly as a bitmask due to
7818 /// one of the inputs being zeroable.
7819 static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
7820 SDValue V2, ArrayRef<int> Mask,
7821 SelectionDAG &DAG) {
7822 MVT EltVT = VT.getScalarType();
7823 int NumEltBits = EltVT.getSizeInBits();
7824 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
7825 SDValue Zero = DAG.getConstant(0, IntEltVT);
7826 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
7827 if (EltVT.isFloatingPoint()) {
7828 Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
7829 AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
7831 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
7832 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7834 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7837 if (Mask[i] % Size != i)
7838 return SDValue(); // Not a blend.
7840 V = Mask[i] < Size ? V1 : V2;
7841 else if (V != (Mask[i] < Size ? V1 : V2))
7842 return SDValue(); // Can only let one input through the mask.
7844 VMaskOps[i] = AllOnes;
7847 return SDValue(); // No non-zeroable elements!
7849 SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
7850 V = DAG.getNode(VT.isFloatingPoint()
7851 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
7856 /// \brief Try to lower a vector shuffle as a byte shift (shifts in zeros).
7858 /// Attempts to match a shuffle mask against the PSRLDQ and PSLLDQ
7859 /// byte-shift instructions. The mask must consist of a shifted sequential
7860 /// shuffle from one of the input vectors and zeroable elements for the
7861 /// remaining 'shifted in' elements.
7862 static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
7863 SDValue V2, ArrayRef<int> Mask,
7864 SelectionDAG &DAG) {
7865 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7867 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7869 int NumElts = VT.getVectorNumElements();
7870 int NumLanes = VT.getSizeInBits() / 128;
7871 int NumLaneElts = NumElts / NumLanes;
7872 int Scale = 16 / NumLaneElts;
7873 MVT ShiftVT = MVT::getVectorVT(MVT::i64, 2 * NumLanes);
7875 // PSLLDQ : (little-endian) left byte shift
7876 // [ zz, 0, 1, 2, 3, 4, 5, 6]
7877 // [ zz, zz, -1, -1, 2, 3, 4, -1]
7878 // [ zz, zz, zz, zz, zz, zz, -1, 1]
7879 // PSRLDQ : (little-endian) right byte shift
7880 // [ 5, 6, 7, zz, zz, zz, zz, zz]
7881 // [ -1, 5, 6, 7, zz, zz, zz, zz]
7882 // [ 1, 2, -1, -1, -1, -1, zz, zz]
7883 auto MatchByteShift = [&](int Shift) -> SDValue {
7884 bool MatchLeft = true, MatchRight = true;
7885 for (int l = 0; l < NumElts; l += NumLaneElts) {
7886 for (int i = 0; i < Shift; ++i)
7887 MatchLeft &= Zeroable[l + i];
7888 for (int i = NumLaneElts - Shift; i < NumLaneElts; ++i)
7889 MatchRight &= Zeroable[l + i];
7891 if (!(MatchLeft || MatchRight))
7894 bool MatchV1 = true, MatchV2 = true;
7895 for (int l = 0; l < NumElts; l += NumLaneElts) {
7896 unsigned Pos = MatchLeft ? Shift + l : l;
7897 unsigned Low = MatchLeft ? l : Shift + l;
7898 unsigned Len = NumLaneElts - Shift;
7899 MatchV1 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low);
7900 MatchV2 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low + NumElts);
7902 if (!(MatchV1 || MatchV2))
7905 int ByteShift = Shift * Scale;
7906 unsigned Op = MatchRight ? X86ISD::VSRLDQ : X86ISD::VSHLDQ;
7907 SDValue V = MatchV1 ? V1 : V2;
7908 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7909 V = DAG.getNode(Op, DL, ShiftVT, V,
7910 DAG.getConstant(ByteShift * 8, MVT::i8));
7911 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7914 for (int Shift = 1; Shift < NumLaneElts; ++Shift)
7915 if (SDValue S = MatchByteShift(Shift))
7922 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7924 /// Attempts to match a shuffle mask against the PSRL(W/D/Q) and PSLL(W/D/Q)
7925 /// SSE2 and AVX2 logical bit-shift instructions. The function matches
7926 /// elements from one of the input vectors shuffled to the left or right
7927 /// with zeroable elements 'shifted in'.
7928 static SDValue lowerVectorShuffleAsBitShift(SDLoc DL, MVT VT, SDValue V1,
7929 SDValue V2, ArrayRef<int> Mask,
7930 SelectionDAG &DAG) {
7931 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7933 int Size = Mask.size();
7934 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7936 // PSRL : (little-endian) right bit shift.
7939 // PSHL : (little-endian) left bit shift.
7941 // [ -1, 4, zz, -1 ]
7942 auto MatchBitShift = [&](int Shift, int Scale) -> SDValue {
7943 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7944 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
7945 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
7946 "Illegal integer vector type");
7948 bool MatchLeft = true, MatchRight = true;
7949 for (int i = 0; i != Size; i += Scale) {
7950 for (int j = 0; j != Shift; ++j) {
7951 MatchLeft &= Zeroable[i + j];
7953 for (int j = Scale - Shift; j != Scale; ++j) {
7954 MatchRight &= Zeroable[i + j];
7957 if (!(MatchLeft || MatchRight))
7960 bool MatchV1 = true, MatchV2 = true;
7961 for (int i = 0; i != Size; i += Scale) {
7962 unsigned Pos = MatchLeft ? i + Shift : i;
7963 unsigned Low = MatchLeft ? i : i + Shift;
7964 unsigned Len = Scale - Shift;
7965 MatchV1 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low);
7966 MatchV2 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low + Size);
7968 if (!(MatchV1 || MatchV2))
7971 // Cast the inputs to ShiftVT to match VSRLI/VSHLI and back again.
7972 unsigned OpCode = MatchLeft ? X86ISD::VSHLI : X86ISD::VSRLI;
7973 int ShiftAmt = Shift * VT.getScalarSizeInBits();
7974 SDValue V = MatchV1 ? V1 : V2;
7975 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7976 V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
7977 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7980 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7981 // keep doubling the size of the integer elements up to that. We can
7982 // then shift the elements of the integer vector by whole multiples of
7983 // their width within the elements of the larger integer vector. Test each
7984 // multiple to see if we can find a match with the moved element indices
7985 // and that the shifted in elements are all zeroable.
7986 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 64; Scale *= 2)
7987 for (int Shift = 1; Shift != Scale; ++Shift)
7988 if (SDValue BitShift = MatchBitShift(Shift, Scale))
7995 /// \brief Lower a vector shuffle as a zero or any extension.
7997 /// Given a specific number of elements, element bit width, and extension
7998 /// stride, produce either a zero or any extension based on the available
7999 /// features of the subtarget.
8000 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8001 SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
8002 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8003 assert(Scale > 1 && "Need a scale to extend.");
8004 int NumElements = VT.getVectorNumElements();
8005 int EltBits = VT.getScalarSizeInBits();
8006 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
8007 "Only 8, 16, and 32 bit elements can be extended.");
8008 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
8010 // Found a valid zext mask! Try various lowering strategies based on the
8011 // input type and available ISA extensions.
8012 if (Subtarget->hasSSE41()) {
8013 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
8014 NumElements / Scale);
8015 return DAG.getNode(ISD::BITCAST, DL, VT,
8016 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
8019 // For any extends we can cheat for larger element sizes and use shuffle
8020 // instructions that can fold with a load and/or copy.
8021 if (AnyExt && EltBits == 32) {
8022 int PSHUFDMask[4] = {0, -1, 1, -1};
8024 ISD::BITCAST, DL, VT,
8025 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8026 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8027 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8029 if (AnyExt && EltBits == 16 && Scale > 2) {
8030 int PSHUFDMask[4] = {0, -1, 0, -1};
8031 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8032 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8033 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
8034 int PSHUFHWMask[4] = {1, -1, -1, -1};
8036 ISD::BITCAST, DL, VT,
8037 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
8038 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
8039 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
8042 // If this would require more than 2 unpack instructions to expand, use
8043 // pshufb when available. We can only use more than 2 unpack instructions
8044 // when zero extending i8 elements which also makes it easier to use pshufb.
8045 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
8046 assert(NumElements == 16 && "Unexpected byte vector width!");
8047 SDValue PSHUFBMask[16];
8048 for (int i = 0; i < 16; ++i)
8050 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
8051 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
8052 return DAG.getNode(ISD::BITCAST, DL, VT,
8053 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
8054 DAG.getNode(ISD::BUILD_VECTOR, DL,
8055 MVT::v16i8, PSHUFBMask)));
8058 // Otherwise emit a sequence of unpacks.
8060 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
8061 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
8062 : getZeroVector(InputVT, Subtarget, DAG, DL);
8063 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
8064 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
8068 } while (Scale > 1);
8069 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
8072 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
8074 /// This routine will try to do everything in its power to cleverly lower
8075 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
8076 /// check for the profitability of this lowering, it tries to aggressively
8077 /// match this pattern. It will use all of the micro-architectural details it
8078 /// can to emit an efficient lowering. It handles both blends with all-zero
8079 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
8080 /// masking out later).
8082 /// The reason we have dedicated lowering for zext-style shuffles is that they
8083 /// are both incredibly common and often quite performance sensitive.
8084 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
8085 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8086 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8087 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8089 int Bits = VT.getSizeInBits();
8090 int NumElements = VT.getVectorNumElements();
8091 assert(VT.getScalarSizeInBits() <= 32 &&
8092 "Exceeds 32-bit integer zero extension limit");
8093 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
8095 // Define a helper function to check a particular ext-scale and lower to it if
8097 auto Lower = [&](int Scale) -> SDValue {
8100 for (int i = 0; i < NumElements; ++i) {
8102 continue; // Valid anywhere but doesn't tell us anything.
8103 if (i % Scale != 0) {
8104 // Each of the extended elements need to be zeroable.
8108 // We no longer are in the anyext case.
8113 // Each of the base elements needs to be consecutive indices into the
8114 // same input vector.
8115 SDValue V = Mask[i] < NumElements ? V1 : V2;
8118 else if (InputV != V)
8119 return SDValue(); // Flip-flopping inputs.
8121 if (Mask[i] % NumElements != i / Scale)
8122 return SDValue(); // Non-consecutive strided elements.
8125 // If we fail to find an input, we have a zero-shuffle which should always
8126 // have already been handled.
8127 // FIXME: Maybe handle this here in case during blending we end up with one?
8131 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8132 DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
8135 // The widest scale possible for extending is to a 64-bit integer.
8136 assert(Bits % 64 == 0 &&
8137 "The number of bits in a vector must be divisible by 64 on x86!");
8138 int NumExtElements = Bits / 64;
8140 // Each iteration, try extending the elements half as much, but into twice as
8142 for (; NumExtElements < NumElements; NumExtElements *= 2) {
8143 assert(NumElements % NumExtElements == 0 &&
8144 "The input vector size must be divisible by the extended size.");
8145 if (SDValue V = Lower(NumElements / NumExtElements))
8149 // General extends failed, but 128-bit vectors may be able to use MOVQ.
8153 // Returns one of the source operands if the shuffle can be reduced to a
8154 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
8155 auto CanZExtLowHalf = [&]() {
8156 for (int i = NumElements / 2; i != NumElements; ++i)
8159 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
8161 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
8166 if (SDValue V = CanZExtLowHalf()) {
8167 V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
8168 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
8169 return DAG.getNode(ISD::BITCAST, DL, VT, V);
8172 // No viable ext lowering found.
8176 /// \brief Try to get a scalar value for a specific element of a vector.
8178 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
8179 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
8180 SelectionDAG &DAG) {
8181 MVT VT = V.getSimpleValueType();
8182 MVT EltVT = VT.getVectorElementType();
8183 while (V.getOpcode() == ISD::BITCAST)
8184 V = V.getOperand(0);
8185 // If the bitcasts shift the element size, we can't extract an equivalent
8187 MVT NewVT = V.getSimpleValueType();
8188 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8191 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8192 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8193 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8198 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8200 /// This is particularly important because the set of instructions varies
8201 /// significantly based on whether the operand is a load or not.
8202 static bool isShuffleFoldableLoad(SDValue V) {
8203 while (V.getOpcode() == ISD::BITCAST)
8204 V = V.getOperand(0);
8206 return ISD::isNON_EXTLoad(V.getNode());
8209 /// \brief Try to lower insertion of a single element into a zero vector.
8211 /// This is a common pattern that we have especially efficient patterns to lower
8212 /// across all subtarget feature sets.
8213 static SDValue lowerVectorShuffleAsElementInsertion(
8214 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8215 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8216 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8218 MVT EltVT = VT.getVectorElementType();
8220 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8221 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8223 bool IsV1Zeroable = true;
8224 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8225 if (i != V2Index && !Zeroable[i]) {
8226 IsV1Zeroable = false;
8230 // Check for a single input from a SCALAR_TO_VECTOR node.
8231 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8232 // all the smarts here sunk into that routine. However, the current
8233 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8234 // vector shuffle lowering is dead.
8235 if (SDValue V2S = getScalarValueForVectorElement(
8236 V2, Mask[V2Index] - Mask.size(), DAG)) {
8237 // We need to zext the scalar if it is smaller than an i32.
8238 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8239 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8240 // Using zext to expand a narrow element won't work for non-zero
8245 // Zero-extend directly to i32.
8247 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8249 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8250 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8251 EltVT == MVT::i16) {
8252 // Either not inserting from the low element of the input or the input
8253 // element size is too small to use VZEXT_MOVL to clear the high bits.
8257 if (!IsV1Zeroable) {
8258 // If V1 can't be treated as a zero vector we have fewer options to lower
8259 // this. We can't support integer vectors or non-zero targets cheaply, and
8260 // the V1 elements can't be permuted in any way.
8261 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8262 if (!VT.isFloatingPoint() || V2Index != 0)
8264 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8265 V1Mask[V2Index] = -1;
8266 if (!isNoopShuffleMask(V1Mask))
8268 // This is essentially a special case blend operation, but if we have
8269 // general purpose blend operations, they are always faster. Bail and let
8270 // the rest of the lowering handle these as blends.
8271 if (Subtarget->hasSSE41())
8274 // Otherwise, use MOVSD or MOVSS.
8275 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8276 "Only two types of floating point element types to handle!");
8277 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8281 // This lowering only works for the low element with floating point vectors.
8282 if (VT.isFloatingPoint() && V2Index != 0)
8285 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8287 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8290 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8291 // the desired position. Otherwise it is more efficient to do a vector
8292 // shift left. We know that we can do a vector shift left because all
8293 // the inputs are zero.
8294 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8295 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8296 V2Shuffle[V2Index] = 0;
8297 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8299 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8301 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8303 V2Index * EltVT.getSizeInBits(),
8304 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8305 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8311 /// \brief Try to lower broadcast of a single element.
8313 /// For convenience, this code also bundles all of the subtarget feature set
8314 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8315 /// a convenient way to factor it out.
8316 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8318 const X86Subtarget *Subtarget,
8319 SelectionDAG &DAG) {
8320 if (!Subtarget->hasAVX())
8322 if (VT.isInteger() && !Subtarget->hasAVX2())
8325 // Check that the mask is a broadcast.
8326 int BroadcastIdx = -1;
8328 if (M >= 0 && BroadcastIdx == -1)
8330 else if (M >= 0 && M != BroadcastIdx)
8333 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8334 "a sorted mask where the broadcast "
8337 // Go up the chain of (vector) values to try and find a scalar load that
8338 // we can combine with the broadcast.
8340 switch (V.getOpcode()) {
8341 case ISD::CONCAT_VECTORS: {
8342 int OperandSize = Mask.size() / V.getNumOperands();
8343 V = V.getOperand(BroadcastIdx / OperandSize);
8344 BroadcastIdx %= OperandSize;
8348 case ISD::INSERT_SUBVECTOR: {
8349 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8350 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8354 int BeginIdx = (int)ConstantIdx->getZExtValue();
8356 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8357 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8358 BroadcastIdx -= BeginIdx;
8369 // Check if this is a broadcast of a scalar. We special case lowering
8370 // for scalars so that we can more effectively fold with loads.
8371 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8372 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8373 V = V.getOperand(BroadcastIdx);
8375 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8377 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8379 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8380 // We can't broadcast from a vector register w/o AVX2, and we can only
8381 // broadcast from the zero-element of a vector register.
8385 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8388 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8389 // INSERTPS when the V1 elements are already in the correct locations
8390 // because otherwise we can just always use two SHUFPS instructions which
8391 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8392 // perform INSERTPS if a single V1 element is out of place and all V2
8393 // elements are zeroable.
8394 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8396 SelectionDAG &DAG) {
8397 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8398 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8399 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8400 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8402 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8405 int V1DstIndex = -1;
8406 int V2DstIndex = -1;
8407 bool V1UsedInPlace = false;
8409 for (int i = 0; i < 4; ++i) {
8410 // Synthesize a zero mask from the zeroable elements (includes undefs).
8416 // Flag if we use any V1 inputs in place.
8418 V1UsedInPlace = true;
8422 // We can only insert a single non-zeroable element.
8423 if (V1DstIndex != -1 || V2DstIndex != -1)
8427 // V1 input out of place for insertion.
8430 // V2 input for insertion.
8435 // Don't bother if we have no (non-zeroable) element for insertion.
8436 if (V1DstIndex == -1 && V2DstIndex == -1)
8439 // Determine element insertion src/dst indices. The src index is from the
8440 // start of the inserted vector, not the start of the concatenated vector.
8441 unsigned V2SrcIndex = 0;
8442 if (V1DstIndex != -1) {
8443 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8444 // and don't use the original V2 at all.
8445 V2SrcIndex = Mask[V1DstIndex];
8446 V2DstIndex = V1DstIndex;
8449 V2SrcIndex = Mask[V2DstIndex] - 4;
8452 // If no V1 inputs are used in place, then the result is created only from
8453 // the zero mask and the V2 insertion - so remove V1 dependency.
8455 V1 = DAG.getUNDEF(MVT::v4f32);
8457 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8458 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8460 // Insert the V2 element into the desired position.
8462 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8463 DAG.getConstant(InsertPSMask, MVT::i8));
8466 /// \brief Try to lower a shuffle as a permute of the inputs followed by an
8467 /// UNPCK instruction.
8469 /// This specifically targets cases where we end up with alternating between
8470 /// the two inputs, and so can permute them into something that feeds a single
8471 /// UNPCK instruction.
8472 static SDValue lowerVectorShuffleAsUnpack(MVT VT, SDLoc DL, SDValue V1,
8473 SDValue V2, ArrayRef<int> Mask,
8474 SelectionDAG &DAG) {
8475 assert(!isSingleInputShuffleMask(Mask) &&
8476 "This routine should only be used when blending two inputs.");
8477 assert(Mask.size() >= 2 && "Single element masks are invalid.");
8479 int Size = Mask.size();
8481 int NumLoInputs = std::count_if(Mask.begin(), Mask.end(), [Size](int M) {
8482 return M >= 0 && M % Size < Size / 2;
8484 int NumHiInputs = std::count_if(
8485 Mask.begin(), Mask.end(), [Size](int M) { return M % Size > Size / 2; });
8487 bool UnpackLo = NumLoInputs >= NumHiInputs;
8489 SmallVector<int, 32> V1Mask(Mask.size(), -1);
8490 SmallVector<int, 32> V2Mask(Mask.size(), -1);
8491 for (int i = 0; i < Size; ++i) {
8495 // We only handle the case where V1 feeds even mask slots and V2 feeds odd
8496 // mask slots. We rely on canonicalization to ensure this is the case.
8497 if ((i % 2 == 0) != (Mask[i] < Size))
8500 SmallVectorImpl<int> &VMask = (i % 2 == 0) ? V1Mask : V2Mask;
8501 VMask[i / 2 + (UnpackLo ? 0 : Size / 2)] = Mask[i] % Size;
8504 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
8505 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
8506 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL, VT, V1,
8510 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8512 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8513 /// support for floating point shuffles but not integer shuffles. These
8514 /// instructions will incur a domain crossing penalty on some chips though so
8515 /// it is better to avoid lowering through this for integer vectors where
8517 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8518 const X86Subtarget *Subtarget,
8519 SelectionDAG &DAG) {
8521 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8522 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8523 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8524 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8525 ArrayRef<int> Mask = SVOp->getMask();
8526 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8528 if (isSingleInputShuffleMask(Mask)) {
8529 // Use low duplicate instructions for masks that match their pattern.
8530 if (Subtarget->hasSSE3())
8531 if (isShuffleEquivalent(V1, V2, Mask, 0, 0))
8532 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8534 // Straight shuffle of a single input vector. Simulate this by using the
8535 // single input as both of the "inputs" to this instruction..
8536 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8538 if (Subtarget->hasAVX()) {
8539 // If we have AVX, we can use VPERMILPS which will allow folding a load
8540 // into the shuffle.
8541 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8542 DAG.getConstant(SHUFPDMask, MVT::i8));
8545 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8546 DAG.getConstant(SHUFPDMask, MVT::i8));
8548 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8549 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8551 // If we have a single input, insert that into V1 if we can do so cheaply.
8552 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8553 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8554 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8556 // Try inverting the insertion since for v2 masks it is easy to do and we
8557 // can't reliably sort the mask one way or the other.
8558 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8559 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8560 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8561 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8565 // Try to use one of the special instruction patterns to handle two common
8566 // blend patterns if a zero-blend above didn't work.
8567 if (isShuffleEquivalent(V1, V2, Mask, 0, 3) || isShuffleEquivalent(V1, V2, Mask, 1, 3))
8568 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8569 // We can either use a special instruction to load over the low double or
8570 // to move just the low double.
8572 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8574 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8576 if (Subtarget->hasSSE41())
8577 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8581 // Use dedicated unpack instructions for masks that match their pattern.
8582 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8583 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8584 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8585 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8587 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8588 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8589 DAG.getConstant(SHUFPDMask, MVT::i8));
8592 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8594 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8595 /// the integer unit to minimize domain crossing penalties. However, for blends
8596 /// it falls back to the floating point shuffle operation with appropriate bit
8598 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8599 const X86Subtarget *Subtarget,
8600 SelectionDAG &DAG) {
8602 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8603 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8604 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8605 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8606 ArrayRef<int> Mask = SVOp->getMask();
8607 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8609 if (isSingleInputShuffleMask(Mask)) {
8610 // Check for being able to broadcast a single element.
8611 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8612 Mask, Subtarget, DAG))
8615 // Straight shuffle of a single input vector. For everything from SSE2
8616 // onward this has a single fast instruction with no scary immediates.
8617 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8618 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8619 int WidenedMask[4] = {
8620 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8621 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8623 ISD::BITCAST, DL, MVT::v2i64,
8624 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8625 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8628 // Try to use byte shift instructions.
8629 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8630 DL, MVT::v2i64, V1, V2, Mask, DAG))
8633 // If we have a single input from V2 insert that into V1 if we can do so
8635 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8636 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8637 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8639 // Try inverting the insertion since for v2 masks it is easy to do and we
8640 // can't reliably sort the mask one way or the other.
8641 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8642 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8643 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8644 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8648 // We have different paths for blend lowering, but they all must use the
8649 // *exact* same predicate.
8650 bool IsBlendSupported = Subtarget->hasSSE41();
8651 if (IsBlendSupported)
8652 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8656 // Use dedicated unpack instructions for masks that match their pattern.
8657 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8658 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8659 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8660 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8662 // Try to use byte rotation instructions.
8663 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8664 if (Subtarget->hasSSSE3())
8665 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8666 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8669 // If we have direct support for blends, we should lower by decomposing into
8670 // a permute. That will be faster than the domain cross.
8671 if (IsBlendSupported)
8672 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v2i64, V1, V2,
8675 // We implement this with SHUFPD which is pretty lame because it will likely
8676 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8677 // However, all the alternatives are still more cycles and newer chips don't
8678 // have this problem. It would be really nice if x86 had better shuffles here.
8679 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8680 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8681 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8682 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8685 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
8687 /// This is used to disable more specialized lowerings when the shufps lowering
8688 /// will happen to be efficient.
8689 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
8690 // This routine only handles 128-bit shufps.
8691 assert(Mask.size() == 4 && "Unsupported mask size!");
8693 // To lower with a single SHUFPS we need to have the low half and high half
8694 // each requiring a single input.
8695 if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
8697 if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
8703 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8705 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8706 /// It makes no assumptions about whether this is the *best* lowering, it simply
8708 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8709 ArrayRef<int> Mask, SDValue V1,
8710 SDValue V2, SelectionDAG &DAG) {
8711 SDValue LowV = V1, HighV = V2;
8712 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8715 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8717 if (NumV2Elements == 1) {
8719 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8722 // Compute the index adjacent to V2Index and in the same half by toggling
8724 int V2AdjIndex = V2Index ^ 1;
8726 if (Mask[V2AdjIndex] == -1) {
8727 // Handles all the cases where we have a single V2 element and an undef.
8728 // This will only ever happen in the high lanes because we commute the
8729 // vector otherwise.
8731 std::swap(LowV, HighV);
8732 NewMask[V2Index] -= 4;
8734 // Handle the case where the V2 element ends up adjacent to a V1 element.
8735 // To make this work, blend them together as the first step.
8736 int V1Index = V2AdjIndex;
8737 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8738 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8739 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8741 // Now proceed to reconstruct the final blend as we have the necessary
8742 // high or low half formed.
8749 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8750 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8752 } else if (NumV2Elements == 2) {
8753 if (Mask[0] < 4 && Mask[1] < 4) {
8754 // Handle the easy case where we have V1 in the low lanes and V2 in the
8758 } else if (Mask[2] < 4 && Mask[3] < 4) {
8759 // We also handle the reversed case because this utility may get called
8760 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8761 // arrange things in the right direction.
8767 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8768 // trying to place elements directly, just blend them and set up the final
8769 // shuffle to place them.
8771 // The first two blend mask elements are for V1, the second two are for
8773 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8774 Mask[2] < 4 ? Mask[2] : Mask[3],
8775 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8776 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8777 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8778 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8780 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8783 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8784 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8785 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8786 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8789 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8790 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8793 /// \brief Lower 4-lane 32-bit floating point shuffles.
8795 /// Uses instructions exclusively from the floating point unit to minimize
8796 /// domain crossing penalties, as these are sufficient to implement all v4f32
8798 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8799 const X86Subtarget *Subtarget,
8800 SelectionDAG &DAG) {
8802 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8803 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8804 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8805 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8806 ArrayRef<int> Mask = SVOp->getMask();
8807 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8810 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8812 if (NumV2Elements == 0) {
8813 // Check for being able to broadcast a single element.
8814 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8815 Mask, Subtarget, DAG))
8818 // Use even/odd duplicate instructions for masks that match their pattern.
8819 if (Subtarget->hasSSE3()) {
8820 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
8821 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8822 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3))
8823 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8826 if (Subtarget->hasAVX()) {
8827 // If we have AVX, we can use VPERMILPS which will allow folding a load
8828 // into the shuffle.
8829 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8830 getV4X86ShuffleImm8ForMask(Mask, DAG));
8833 // Otherwise, use a straight shuffle of a single input vector. We pass the
8834 // input vector to both operands to simulate this with a SHUFPS.
8835 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8836 getV4X86ShuffleImm8ForMask(Mask, DAG));
8839 // There are special ways we can lower some single-element blends. However, we
8840 // have custom ways we can lower more complex single-element blends below that
8841 // we defer to if both this and BLENDPS fail to match, so restrict this to
8842 // when the V2 input is targeting element 0 of the mask -- that is the fast
8844 if (NumV2Elements == 1 && Mask[0] >= 4)
8845 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8846 Mask, Subtarget, DAG))
8849 if (Subtarget->hasSSE41()) {
8850 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8854 // Use INSERTPS if we can complete the shuffle efficiently.
8855 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8858 if (!isSingleSHUFPSMask(Mask))
8859 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
8860 DL, MVT::v4f32, V1, V2, Mask, DAG))
8864 // Use dedicated unpack instructions for masks that match their pattern.
8865 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8866 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8867 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8868 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8870 // Otherwise fall back to a SHUFPS lowering strategy.
8871 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8874 /// \brief Lower 4-lane i32 vector shuffles.
8876 /// We try to handle these with integer-domain shuffles where we can, but for
8877 /// blends we use the floating point domain blend instructions.
8878 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8879 const X86Subtarget *Subtarget,
8880 SelectionDAG &DAG) {
8882 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8883 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8884 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8885 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8886 ArrayRef<int> Mask = SVOp->getMask();
8887 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8889 // Whenever we can lower this as a zext, that instruction is strictly faster
8890 // than any alternative. It also allows us to fold memory operands into the
8891 // shuffle in many cases.
8892 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8893 Mask, Subtarget, DAG))
8897 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8899 if (NumV2Elements == 0) {
8900 // Check for being able to broadcast a single element.
8901 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8902 Mask, Subtarget, DAG))
8905 // Straight shuffle of a single input vector. For everything from SSE2
8906 // onward this has a single fast instruction with no scary immediates.
8907 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8908 // but we aren't actually going to use the UNPCK instruction because doing
8909 // so prevents folding a load into this instruction or making a copy.
8910 const int UnpackLoMask[] = {0, 0, 1, 1};
8911 const int UnpackHiMask[] = {2, 2, 3, 3};
8912 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 1, 1))
8913 Mask = UnpackLoMask;
8914 else if (isShuffleEquivalent(V1, V2, Mask, 2, 2, 3, 3))
8915 Mask = UnpackHiMask;
8917 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8918 getV4X86ShuffleImm8ForMask(Mask, DAG));
8921 // Try to use bit shift instructions.
8922 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8923 DL, MVT::v4i32, V1, V2, Mask, DAG))
8926 // Try to use byte shift instructions.
8927 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8928 DL, MVT::v4i32, V1, V2, Mask, DAG))
8931 // There are special ways we can lower some single-element blends.
8932 if (NumV2Elements == 1)
8933 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8934 Mask, Subtarget, DAG))
8937 // We have different paths for blend lowering, but they all must use the
8938 // *exact* same predicate.
8939 bool IsBlendSupported = Subtarget->hasSSE41();
8940 if (IsBlendSupported)
8941 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8945 if (SDValue Masked =
8946 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
8949 // Use dedicated unpack instructions for masks that match their pattern.
8950 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8951 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
8952 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8953 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
8955 // Try to use byte rotation instructions.
8956 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8957 if (Subtarget->hasSSSE3())
8958 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8959 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
8962 // If we have direct support for blends, we should lower by decomposing into
8963 // a permute. That will be faster than the domain cross.
8964 if (IsBlendSupported)
8965 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i32, V1, V2,
8968 // Try to lower by permuting the inputs into an unpack instruction.
8969 if (SDValue Unpack =
8970 lowerVectorShuffleAsUnpack(MVT::v4i32, DL, V1, V2, Mask, DAG))
8973 // We implement this with SHUFPS because it can blend from two vectors.
8974 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
8975 // up the inputs, bypassing domain shift penalties that we would encur if we
8976 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
8978 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
8979 DAG.getVectorShuffle(
8981 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
8982 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
8985 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
8986 /// shuffle lowering, and the most complex part.
8988 /// The lowering strategy is to try to form pairs of input lanes which are
8989 /// targeted at the same half of the final vector, and then use a dword shuffle
8990 /// to place them onto the right half, and finally unpack the paired lanes into
8991 /// their final position.
8993 /// The exact breakdown of how to form these dword pairs and align them on the
8994 /// correct sides is really tricky. See the comments within the function for
8995 /// more of the details.
8996 static SDValue lowerV8I16SingleInputVectorShuffle(
8997 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
8998 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8999 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9000 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
9001 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
9003 SmallVector<int, 4> LoInputs;
9004 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
9005 [](int M) { return M >= 0; });
9006 std::sort(LoInputs.begin(), LoInputs.end());
9007 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
9008 SmallVector<int, 4> HiInputs;
9009 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
9010 [](int M) { return M >= 0; });
9011 std::sort(HiInputs.begin(), HiInputs.end());
9012 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
9014 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
9015 int NumHToL = LoInputs.size() - NumLToL;
9017 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
9018 int NumHToH = HiInputs.size() - NumLToH;
9019 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
9020 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
9021 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
9022 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
9024 // Check for being able to broadcast a single element.
9025 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
9026 Mask, Subtarget, DAG))
9029 // Try to use bit shift instructions.
9030 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9031 DL, MVT::v8i16, V, V, Mask, DAG))
9034 // Try to use byte shift instructions.
9035 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9036 DL, MVT::v8i16, V, V, Mask, DAG))
9039 // Use dedicated unpack instructions for masks that match their pattern.
9040 if (isShuffleEquivalent(V, V, Mask, 0, 0, 1, 1, 2, 2, 3, 3))
9041 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
9042 if (isShuffleEquivalent(V, V, Mask, 4, 4, 5, 5, 6, 6, 7, 7))
9043 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
9045 // Try to use byte rotation instructions.
9046 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9047 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
9050 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
9051 // such inputs we can swap two of the dwords across the half mark and end up
9052 // with <=2 inputs to each half in each half. Once there, we can fall through
9053 // to the generic code below. For example:
9055 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9056 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
9058 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
9059 // and an existing 2-into-2 on the other half. In this case we may have to
9060 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
9061 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
9062 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
9063 // because any other situation (including a 3-into-1 or 1-into-3 in the other
9064 // half than the one we target for fixing) will be fixed when we re-enter this
9065 // path. We will also combine away any sequence of PSHUFD instructions that
9066 // result into a single instruction. Here is an example of the tricky case:
9068 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
9069 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
9071 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
9073 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
9074 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
9076 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
9077 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
9079 // The result is fine to be handled by the generic logic.
9080 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
9081 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
9082 int AOffset, int BOffset) {
9083 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
9084 "Must call this with A having 3 or 1 inputs from the A half.");
9085 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
9086 "Must call this with B having 1 or 3 inputs from the B half.");
9087 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
9088 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
9090 // Compute the index of dword with only one word among the three inputs in
9091 // a half by taking the sum of the half with three inputs and subtracting
9092 // the sum of the actual three inputs. The difference is the remaining
9095 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
9096 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
9097 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
9098 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
9099 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
9100 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
9101 int TripleNonInputIdx =
9102 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
9103 TripleDWord = TripleNonInputIdx / 2;
9105 // We use xor with one to compute the adjacent DWord to whichever one the
9107 OneInputDWord = (OneInput / 2) ^ 1;
9109 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
9110 // and BToA inputs. If there is also such a problem with the BToB and AToB
9111 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
9112 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
9113 // is essential that we don't *create* a 3<-1 as then we might oscillate.
9114 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
9115 // Compute how many inputs will be flipped by swapping these DWords. We
9117 // to balance this to ensure we don't form a 3-1 shuffle in the other
9119 int NumFlippedAToBInputs =
9120 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
9121 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
9122 int NumFlippedBToBInputs =
9123 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
9124 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
9125 if ((NumFlippedAToBInputs == 1 &&
9126 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
9127 (NumFlippedBToBInputs == 1 &&
9128 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
9129 // We choose whether to fix the A half or B half based on whether that
9130 // half has zero flipped inputs. At zero, we may not be able to fix it
9131 // with that half. We also bias towards fixing the B half because that
9132 // will more commonly be the high half, and we have to bias one way.
9133 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
9134 ArrayRef<int> Inputs) {
9135 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
9136 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
9137 PinnedIdx ^ 1) != Inputs.end();
9138 // Determine whether the free index is in the flipped dword or the
9139 // unflipped dword based on where the pinned index is. We use this bit
9140 // in an xor to conditionally select the adjacent dword.
9141 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
9142 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9143 FixFreeIdx) != Inputs.end();
9144 if (IsFixIdxInput == IsFixFreeIdxInput)
9146 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9147 FixFreeIdx) != Inputs.end();
9148 assert(IsFixIdxInput != IsFixFreeIdxInput &&
9149 "We need to be changing the number of flipped inputs!");
9150 int PSHUFHalfMask[] = {0, 1, 2, 3};
9151 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
9152 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
9154 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
9157 if (M != -1 && M == FixIdx)
9159 else if (M != -1 && M == FixFreeIdx)
9162 if (NumFlippedBToBInputs != 0) {
9164 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9165 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
9167 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
9169 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9170 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
9175 int PSHUFDMask[] = {0, 1, 2, 3};
9176 PSHUFDMask[ADWord] = BDWord;
9177 PSHUFDMask[BDWord] = ADWord;
9178 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9179 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9180 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9181 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9183 // Adjust the mask to match the new locations of A and B.
9185 if (M != -1 && M/2 == ADWord)
9186 M = 2 * BDWord + M % 2;
9187 else if (M != -1 && M/2 == BDWord)
9188 M = 2 * ADWord + M % 2;
9190 // Recurse back into this routine to re-compute state now that this isn't
9191 // a 3 and 1 problem.
9192 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9195 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
9196 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
9197 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
9198 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
9200 // At this point there are at most two inputs to the low and high halves from
9201 // each half. That means the inputs can always be grouped into dwords and
9202 // those dwords can then be moved to the correct half with a dword shuffle.
9203 // We use at most one low and one high word shuffle to collect these paired
9204 // inputs into dwords, and finally a dword shuffle to place them.
9205 int PSHUFLMask[4] = {-1, -1, -1, -1};
9206 int PSHUFHMask[4] = {-1, -1, -1, -1};
9207 int PSHUFDMask[4] = {-1, -1, -1, -1};
9209 // First fix the masks for all the inputs that are staying in their
9210 // original halves. This will then dictate the targets of the cross-half
9212 auto fixInPlaceInputs =
9213 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9214 MutableArrayRef<int> SourceHalfMask,
9215 MutableArrayRef<int> HalfMask, int HalfOffset) {
9216 if (InPlaceInputs.empty())
9218 if (InPlaceInputs.size() == 1) {
9219 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9220 InPlaceInputs[0] - HalfOffset;
9221 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9224 if (IncomingInputs.empty()) {
9225 // Just fix all of the in place inputs.
9226 for (int Input : InPlaceInputs) {
9227 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9228 PSHUFDMask[Input / 2] = Input / 2;
9233 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9234 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9235 InPlaceInputs[0] - HalfOffset;
9236 // Put the second input next to the first so that they are packed into
9237 // a dword. We find the adjacent index by toggling the low bit.
9238 int AdjIndex = InPlaceInputs[0] ^ 1;
9239 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9240 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9241 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9243 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9244 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9246 // Now gather the cross-half inputs and place them into a free dword of
9247 // their target half.
9248 // FIXME: This operation could almost certainly be simplified dramatically to
9249 // look more like the 3-1 fixing operation.
9250 auto moveInputsToRightHalf = [&PSHUFDMask](
9251 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9252 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9253 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9255 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9256 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
9258 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9260 int LowWord = Word & ~1;
9261 int HighWord = Word | 1;
9262 return isWordClobbered(SourceHalfMask, LowWord) ||
9263 isWordClobbered(SourceHalfMask, HighWord);
9266 if (IncomingInputs.empty())
9269 if (ExistingInputs.empty()) {
9270 // Map any dwords with inputs from them into the right half.
9271 for (int Input : IncomingInputs) {
9272 // If the source half mask maps over the inputs, turn those into
9273 // swaps and use the swapped lane.
9274 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9275 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
9276 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9277 Input - SourceOffset;
9278 // We have to swap the uses in our half mask in one sweep.
9279 for (int &M : HalfMask)
9280 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9282 else if (M == Input)
9283 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9285 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9286 Input - SourceOffset &&
9287 "Previous placement doesn't match!");
9289 // Note that this correctly re-maps both when we do a swap and when
9290 // we observe the other side of the swap above. We rely on that to
9291 // avoid swapping the members of the input list directly.
9292 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9295 // Map the input's dword into the correct half.
9296 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9297 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9299 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9301 "Previous placement doesn't match!");
9304 // And just directly shift any other-half mask elements to be same-half
9305 // as we will have mirrored the dword containing the element into the
9306 // same position within that half.
9307 for (int &M : HalfMask)
9308 if (M >= SourceOffset && M < SourceOffset + 4) {
9309 M = M - SourceOffset + DestOffset;
9310 assert(M >= 0 && "This should never wrap below zero!");
9315 // Ensure we have the input in a viable dword of its current half. This
9316 // is particularly tricky because the original position may be clobbered
9317 // by inputs being moved and *staying* in that half.
9318 if (IncomingInputs.size() == 1) {
9319 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9320 int InputFixed = std::find(std::begin(SourceHalfMask),
9321 std::end(SourceHalfMask), -1) -
9322 std::begin(SourceHalfMask) + SourceOffset;
9323 SourceHalfMask[InputFixed - SourceOffset] =
9324 IncomingInputs[0] - SourceOffset;
9325 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9327 IncomingInputs[0] = InputFixed;
9329 } else if (IncomingInputs.size() == 2) {
9330 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9331 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9332 // We have two non-adjacent or clobbered inputs we need to extract from
9333 // the source half. To do this, we need to map them into some adjacent
9334 // dword slot in the source mask.
9335 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9336 IncomingInputs[1] - SourceOffset};
9338 // If there is a free slot in the source half mask adjacent to one of
9339 // the inputs, place the other input in it. We use (Index XOR 1) to
9340 // compute an adjacent index.
9341 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9342 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9343 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9344 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9345 InputsFixed[1] = InputsFixed[0] ^ 1;
9346 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9347 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9348 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9349 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9350 InputsFixed[0] = InputsFixed[1] ^ 1;
9351 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9352 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9353 // The two inputs are in the same DWord but it is clobbered and the
9354 // adjacent DWord isn't used at all. Move both inputs to the free
9356 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9357 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9358 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9359 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9361 // The only way we hit this point is if there is no clobbering
9362 // (because there are no off-half inputs to this half) and there is no
9363 // free slot adjacent to one of the inputs. In this case, we have to
9364 // swap an input with a non-input.
9365 for (int i = 0; i < 4; ++i)
9366 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9367 "We can't handle any clobbers here!");
9368 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9369 "Cannot have adjacent inputs here!");
9371 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9372 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9374 // We also have to update the final source mask in this case because
9375 // it may need to undo the above swap.
9376 for (int &M : FinalSourceHalfMask)
9377 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9378 M = InputsFixed[1] + SourceOffset;
9379 else if (M == InputsFixed[1] + SourceOffset)
9380 M = (InputsFixed[0] ^ 1) + SourceOffset;
9382 InputsFixed[1] = InputsFixed[0] ^ 1;
9385 // Point everything at the fixed inputs.
9386 for (int &M : HalfMask)
9387 if (M == IncomingInputs[0])
9388 M = InputsFixed[0] + SourceOffset;
9389 else if (M == IncomingInputs[1])
9390 M = InputsFixed[1] + SourceOffset;
9392 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9393 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9396 llvm_unreachable("Unhandled input size!");
9399 // Now hoist the DWord down to the right half.
9400 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9401 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9402 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9403 for (int &M : HalfMask)
9404 for (int Input : IncomingInputs)
9406 M = FreeDWord * 2 + Input % 2;
9408 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9409 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9410 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9411 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9413 // Now enact all the shuffles we've computed to move the inputs into their
9415 if (!isNoopShuffleMask(PSHUFLMask))
9416 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9417 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9418 if (!isNoopShuffleMask(PSHUFHMask))
9419 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9420 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9421 if (!isNoopShuffleMask(PSHUFDMask))
9422 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9423 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9424 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9425 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9427 // At this point, each half should contain all its inputs, and we can then
9428 // just shuffle them into their final position.
9429 assert(std::count_if(LoMask.begin(), LoMask.end(),
9430 [](int M) { return M >= 4; }) == 0 &&
9431 "Failed to lift all the high half inputs to the low mask!");
9432 assert(std::count_if(HiMask.begin(), HiMask.end(),
9433 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9434 "Failed to lift all the low half inputs to the high mask!");
9436 // Do a half shuffle for the low mask.
9437 if (!isNoopShuffleMask(LoMask))
9438 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9439 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9441 // Do a half shuffle with the high mask after shifting its values down.
9442 for (int &M : HiMask)
9445 if (!isNoopShuffleMask(HiMask))
9446 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9447 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9452 /// \brief Detect whether the mask pattern should be lowered through
9455 /// This essentially tests whether viewing the mask as an interleaving of two
9456 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9457 /// lowering it through interleaving is a significantly better strategy.
9458 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9459 int NumEvenInputs[2] = {0, 0};
9460 int NumOddInputs[2] = {0, 0};
9461 int NumLoInputs[2] = {0, 0};
9462 int NumHiInputs[2] = {0, 0};
9463 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9467 int InputIdx = Mask[i] >= Size;
9470 ++NumLoInputs[InputIdx];
9472 ++NumHiInputs[InputIdx];
9475 ++NumEvenInputs[InputIdx];
9477 ++NumOddInputs[InputIdx];
9480 // The minimum number of cross-input results for both the interleaved and
9481 // split cases. If interleaving results in fewer cross-input results, return
9483 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9484 NumEvenInputs[0] + NumOddInputs[1]);
9485 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9486 NumLoInputs[0] + NumHiInputs[1]);
9487 return InterleavedCrosses < SplitCrosses;
9490 /// \brief Blend two v8i16 vectors using a naive unpack strategy.
9492 /// This strategy only works when the inputs from each vector fit into a single
9493 /// half of that vector, and generally there are not so many inputs as to leave
9494 /// the in-place shuffles required highly constrained (and thus expensive). It
9495 /// shifts all the inputs into a single side of both input vectors and then
9496 /// uses an unpack to interleave these inputs in a single vector. At that
9497 /// point, we will fall back on the generic single input shuffle lowering.
9498 static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
9500 MutableArrayRef<int> Mask,
9501 const X86Subtarget *Subtarget,
9502 SelectionDAG &DAG) {
9503 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9504 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9505 SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
9506 for (int i = 0; i < 8; ++i)
9507 if (Mask[i] >= 0 && Mask[i] < 4)
9508 LoV1Inputs.push_back(i);
9509 else if (Mask[i] >= 4 && Mask[i] < 8)
9510 HiV1Inputs.push_back(i);
9511 else if (Mask[i] >= 8 && Mask[i] < 12)
9512 LoV2Inputs.push_back(i);
9513 else if (Mask[i] >= 12)
9514 HiV2Inputs.push_back(i);
9516 int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
9517 int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
9520 assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
9521 assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
9522 assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
9524 bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
9525 HiV1Inputs.size() + HiV2Inputs.size();
9527 auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
9528 ArrayRef<int> HiInputs, bool MoveToLo,
9530 ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
9531 ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
9532 if (BadInputs.empty())
9535 int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9536 int MoveOffset = MoveToLo ? 0 : 4;
9538 if (GoodInputs.empty()) {
9539 for (int BadInput : BadInputs) {
9540 MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
9541 Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
9544 if (GoodInputs.size() == 2) {
9545 // If the low inputs are spread across two dwords, pack them into
9547 MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
9548 MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
9549 Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
9550 Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
9552 // Otherwise pin the good inputs.
9553 for (int GoodInput : GoodInputs)
9554 MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
9557 if (BadInputs.size() == 2) {
9558 // If we have two bad inputs then there may be either one or two good
9559 // inputs fixed in place. Find a fixed input, and then find the *other*
9560 // two adjacent indices by using modular arithmetic.
9562 std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
9563 [](int M) { return M >= 0; }) -
9564 std::begin(MoveMask);
9566 ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
9567 assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
9568 assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
9569 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9570 MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
9571 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9572 Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
9574 assert(BadInputs.size() == 1 && "All sizes handled");
9575 int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
9576 std::end(MoveMask), -1) -
9577 std::begin(MoveMask);
9578 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9579 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9583 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9586 V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
9588 V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
9591 // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
9592 // cross-half traffic in the final shuffle.
9594 // Munge the mask to be a single-input mask after the unpack merges the
9598 M = 2 * (M % 4) + (M / 8);
9600 return DAG.getVectorShuffle(
9601 MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
9602 DL, MVT::v8i16, V1, V2),
9603 DAG.getUNDEF(MVT::v8i16), Mask);
9606 /// \brief Generic lowering of 8-lane i16 shuffles.
9608 /// This handles both single-input shuffles and combined shuffle/blends with
9609 /// two inputs. The single input shuffles are immediately delegated to
9610 /// a dedicated lowering routine.
9612 /// The blends are lowered in one of three fundamental ways. If there are few
9613 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9614 /// of the input is significantly cheaper when lowered as an interleaving of
9615 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9616 /// halves of the inputs separately (making them have relatively few inputs)
9617 /// and then concatenate them.
9618 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9619 const X86Subtarget *Subtarget,
9620 SelectionDAG &DAG) {
9622 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9623 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9624 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9625 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9626 ArrayRef<int> OrigMask = SVOp->getMask();
9627 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9628 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9629 MutableArrayRef<int> Mask(MaskStorage);
9631 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9633 // Whenever we can lower this as a zext, that instruction is strictly faster
9634 // than any alternative.
9635 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9636 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9639 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9640 auto isV2 = [](int M) { return M >= 8; };
9642 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9643 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9645 if (NumV2Inputs == 0)
9646 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9648 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9649 "to be V1-input shuffles.");
9651 // Try to use bit shift instructions.
9652 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9653 DL, MVT::v8i16, V1, V2, Mask, DAG))
9656 // Try to use byte shift instructions.
9657 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9658 DL, MVT::v8i16, V1, V2, Mask, DAG))
9661 // There are special ways we can lower some single-element blends.
9662 if (NumV2Inputs == 1)
9663 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9664 Mask, Subtarget, DAG))
9667 // We have different paths for blend lowering, but they all must use the
9668 // *exact* same predicate.
9669 bool IsBlendSupported = Subtarget->hasSSE41();
9670 if (IsBlendSupported)
9671 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9675 if (SDValue Masked =
9676 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
9679 // Use dedicated unpack instructions for masks that match their pattern.
9680 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9681 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9682 if (isShuffleEquivalent(V1, V2, Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9683 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9685 // Try to use byte rotation instructions.
9686 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9687 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9690 if (NumV1Inputs + NumV2Inputs <= 4)
9691 return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
9693 // Check whether an interleaving lowering is likely to be more efficient.
9694 // This isn't perfect but it is a strong heuristic that tends to work well on
9695 // the kinds of shuffles that show up in practice.
9697 // FIXME: Handle 1x, 2x, and 4x interleaving.
9698 if (shouldLowerAsInterleaving(Mask)) {
9699 // FIXME: Figure out whether we should pack these into the low or high
9702 int EMask[8], OMask[8];
9703 for (int i = 0; i < 4; ++i) {
9704 EMask[i] = Mask[2*i];
9705 OMask[i] = Mask[2*i + 1];
9710 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9711 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9713 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9716 // If we have direct support for blends, we should lower by decomposing into
9718 if (IsBlendSupported)
9719 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i16, V1, V2,
9722 // Try to lower by permuting the inputs into an unpack instruction.
9723 if (SDValue Unpack =
9724 lowerVectorShuffleAsUnpack(MVT::v8i16, DL, V1, V2, Mask, DAG))
9727 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9728 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9730 for (int i = 0; i < 4; ++i) {
9731 LoBlendMask[i] = Mask[i];
9732 HiBlendMask[i] = Mask[i + 4];
9735 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9736 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9737 LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
9738 HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
9740 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9741 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
9744 /// \brief Check whether a compaction lowering can be done by dropping even
9745 /// elements and compute how many times even elements must be dropped.
9747 /// This handles shuffles which take every Nth element where N is a power of
9748 /// two. Example shuffle masks:
9750 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9751 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9752 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9753 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9754 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9755 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9757 /// Any of these lanes can of course be undef.
9759 /// This routine only supports N <= 3.
9760 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9763 /// \returns N above, or the number of times even elements must be dropped if
9764 /// there is such a number. Otherwise returns zero.
9765 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9766 // Figure out whether we're looping over two inputs or just one.
9767 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9769 // The modulus for the shuffle vector entries is based on whether this is
9770 // a single input or not.
9771 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9772 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9773 "We should only be called with masks with a power-of-2 size!");
9775 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9777 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9778 // and 2^3 simultaneously. This is because we may have ambiguity with
9779 // partially undef inputs.
9780 bool ViableForN[3] = {true, true, true};
9782 for (int i = 0, e = Mask.size(); i < e; ++i) {
9783 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9788 bool IsAnyViable = false;
9789 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9790 if (ViableForN[j]) {
9793 // The shuffle mask must be equal to (i * 2^N) % M.
9794 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9797 ViableForN[j] = false;
9799 // Early exit if we exhaust the possible powers of two.
9804 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9808 // Return 0 as there is no viable power of two.
9812 /// \brief Generic lowering of v16i8 shuffles.
9814 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9815 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9816 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9817 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9819 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9820 const X86Subtarget *Subtarget,
9821 SelectionDAG &DAG) {
9823 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9824 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9825 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9826 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9827 ArrayRef<int> OrigMask = SVOp->getMask();
9828 assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9830 // Try to use bit shift instructions.
9831 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9832 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9835 // Try to use byte shift instructions.
9836 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9837 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9840 // Try to use byte rotation instructions.
9841 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9842 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9845 // Try to use a zext lowering.
9846 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9847 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9850 int MaskStorage[16] = {
9851 OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9852 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
9853 OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
9854 OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
9855 MutableArrayRef<int> Mask(MaskStorage);
9856 MutableArrayRef<int> LoMask = Mask.slice(0, 8);
9857 MutableArrayRef<int> HiMask = Mask.slice(8, 8);
9860 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9862 // For single-input shuffles, there are some nicer lowering tricks we can use.
9863 if (NumV2Elements == 0) {
9864 // Check for being able to broadcast a single element.
9865 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9866 Mask, Subtarget, DAG))
9869 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9870 // Notably, this handles splat and partial-splat shuffles more efficiently.
9871 // However, it only makes sense if the pre-duplication shuffle simplifies
9872 // things significantly. Currently, this means we need to be able to
9873 // express the pre-duplication shuffle as an i16 shuffle.
9875 // FIXME: We should check for other patterns which can be widened into an
9876 // i16 shuffle as well.
9877 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9878 for (int i = 0; i < 16; i += 2)
9879 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9884 auto tryToWidenViaDuplication = [&]() -> SDValue {
9885 if (!canWidenViaDuplication(Mask))
9887 SmallVector<int, 4> LoInputs;
9888 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9889 [](int M) { return M >= 0 && M < 8; });
9890 std::sort(LoInputs.begin(), LoInputs.end());
9891 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9893 SmallVector<int, 4> HiInputs;
9894 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9895 [](int M) { return M >= 8; });
9896 std::sort(HiInputs.begin(), HiInputs.end());
9897 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9900 bool TargetLo = LoInputs.size() >= HiInputs.size();
9901 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9902 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9904 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9905 SmallDenseMap<int, int, 8> LaneMap;
9906 for (int I : InPlaceInputs) {
9907 PreDupI16Shuffle[I/2] = I/2;
9910 int j = TargetLo ? 0 : 4, je = j + 4;
9911 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9912 // Check if j is already a shuffle of this input. This happens when
9913 // there are two adjacent bytes after we move the low one.
9914 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9915 // If we haven't yet mapped the input, search for a slot into which
9917 while (j < je && PreDupI16Shuffle[j] != -1)
9921 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9924 // Map this input with the i16 shuffle.
9925 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9928 // Update the lane map based on the mapping we ended up with.
9929 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9932 ISD::BITCAST, DL, MVT::v16i8,
9933 DAG.getVectorShuffle(MVT::v8i16, DL,
9934 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9935 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9937 // Unpack the bytes to form the i16s that will be shuffled into place.
9938 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9939 MVT::v16i8, V1, V1);
9941 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9942 for (int i = 0; i < 16; ++i)
9943 if (Mask[i] != -1) {
9944 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9945 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9946 if (PostDupI16Shuffle[i / 2] == -1)
9947 PostDupI16Shuffle[i / 2] = MappedMask;
9949 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9950 "Conflicting entrties in the original shuffle!");
9953 ISD::BITCAST, DL, MVT::v16i8,
9954 DAG.getVectorShuffle(MVT::v8i16, DL,
9955 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9956 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9958 if (SDValue V = tryToWidenViaDuplication())
9962 // Check whether an interleaving lowering is likely to be more efficient.
9963 // This isn't perfect but it is a strong heuristic that tends to work well on
9964 // the kinds of shuffles that show up in practice.
9966 // FIXME: We need to handle other interleaving widths (i16, i32, ...).
9967 if (shouldLowerAsInterleaving(Mask)) {
9968 int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9969 return (M >= 0 && M < 8) || (M >= 16 && M < 24);
9971 int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9972 return (M >= 8 && M < 16) || M >= 24;
9974 int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9975 -1, -1, -1, -1, -1, -1, -1, -1};
9976 int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9977 -1, -1, -1, -1, -1, -1, -1, -1};
9978 bool UnpackLo = NumLoHalf >= NumHiHalf;
9979 MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
9980 MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
9981 for (int i = 0; i < 8; ++i) {
9982 TargetEMask[i] = Mask[2 * i];
9983 TargetOMask[i] = Mask[2 * i + 1];
9986 SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
9987 SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
9989 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9990 MVT::v16i8, Evens, Odds);
9993 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9994 // with PSHUFB. It is important to do this before we attempt to generate any
9995 // blends but after all of the single-input lowerings. If the single input
9996 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9997 // want to preserve that and we can DAG combine any longer sequences into
9998 // a PSHUFB in the end. But once we start blending from multiple inputs,
9999 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
10000 // and there are *very* few patterns that would actually be faster than the
10001 // PSHUFB approach because of its ability to zero lanes.
10003 // FIXME: The only exceptions to the above are blends which are exact
10004 // interleavings with direct instructions supporting them. We currently don't
10005 // handle those well here.
10006 if (Subtarget->hasSSSE3()) {
10007 SDValue V1Mask[16];
10008 SDValue V2Mask[16];
10009 bool V1InUse = false;
10010 bool V2InUse = false;
10011 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
10013 for (int i = 0; i < 16; ++i) {
10014 if (Mask[i] == -1) {
10015 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
10017 const int ZeroMask = 0x80;
10018 int V1Idx = (Mask[i] < 16 ? Mask[i] : ZeroMask);
10019 int V2Idx = (Mask[i] < 16 ? ZeroMask : Mask[i] - 16);
10021 V1Idx = V2Idx = ZeroMask;
10022 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
10023 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
10024 V1InUse |= (ZeroMask != V1Idx);
10025 V2InUse |= (ZeroMask != V2Idx);
10029 // If both V1 and V2 are in use and we can use a direct blend, do so. This
10030 // avoids using blends to handle blends-with-zero which is important as
10031 // a single pshufb is significantly faster for that.
10032 if (V1InUse && V2InUse && Subtarget->hasSSE41())
10033 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i8, V1, V2, Mask,
10039 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
10040 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
10042 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
10043 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
10045 // If we need shuffled inputs from both, blend the two.
10046 if (V1InUse && V2InUse)
10047 return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
10049 return V1; // Single inputs are easy.
10051 return V2; // Single inputs are easy.
10052 // Shuffling to a zeroable vector.
10053 return getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
10056 // There are special ways we can lower some single-element blends.
10057 if (NumV2Elements == 1)
10058 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
10059 Mask, Subtarget, DAG))
10062 // Check whether a compaction lowering can be done. This handles shuffles
10063 // which take every Nth element for some even N. See the helper function for
10066 // We special case these as they can be particularly efficiently handled with
10067 // the PACKUSB instruction on x86 and they show up in common patterns of
10068 // rearranging bytes to truncate wide elements.
10069 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
10070 // NumEvenDrops is the power of two stride of the elements. Another way of
10071 // thinking about it is that we need to drop the even elements this many
10072 // times to get the original input.
10073 bool IsSingleInput = isSingleInputShuffleMask(Mask);
10075 // First we need to zero all the dropped bytes.
10076 assert(NumEvenDrops <= 3 &&
10077 "No support for dropping even elements more than 3 times.");
10078 // We use the mask type to pick which bytes are preserved based on how many
10079 // elements are dropped.
10080 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
10081 SDValue ByteClearMask =
10082 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
10083 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
10084 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
10085 if (!IsSingleInput)
10086 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
10088 // Now pack things back together.
10089 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
10090 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
10091 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
10092 for (int i = 1; i < NumEvenDrops; ++i) {
10093 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
10094 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
10100 int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10101 int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10102 int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10103 int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
10105 auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
10106 MutableArrayRef<int> V1HalfBlendMask,
10107 MutableArrayRef<int> V2HalfBlendMask) {
10108 for (int i = 0; i < 8; ++i)
10109 if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
10110 V1HalfBlendMask[i] = HalfMask[i];
10112 } else if (HalfMask[i] >= 16) {
10113 V2HalfBlendMask[i] = HalfMask[i] - 16;
10114 HalfMask[i] = i + 8;
10117 buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
10118 buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
10120 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
10122 auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
10123 MutableArrayRef<int> HiBlendMask) {
10125 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
10126 // them out and avoid using UNPCK{L,H} to extract the elements of V as
10128 if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
10129 [](int M) { return M >= 0 && M % 2 == 1; }) &&
10130 std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
10131 [](int M) { return M >= 0 && M % 2 == 1; })) {
10132 // Use a mask to drop the high bytes.
10133 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
10134 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
10135 DAG.getConstant(0x00FF, MVT::v8i16));
10137 // This will be a single vector shuffle instead of a blend so nuke V2.
10138 V2 = DAG.getUNDEF(MVT::v8i16);
10140 // Squash the masks to point directly into V1.
10141 for (int &M : LoBlendMask)
10144 for (int &M : HiBlendMask)
10148 // Otherwise just unpack the low half of V into V1 and the high half into
10149 // V2 so that we can blend them as i16s.
10150 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10151 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
10152 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10153 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
10156 SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
10157 SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
10158 return std::make_pair(BlendedLo, BlendedHi);
10160 SDValue V1Lo, V1Hi, V2Lo, V2Hi;
10161 std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
10162 std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
10164 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
10165 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
10167 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
10170 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
10172 /// This routine breaks down the specific type of 128-bit shuffle and
10173 /// dispatches to the lowering routines accordingly.
10174 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10175 MVT VT, const X86Subtarget *Subtarget,
10176 SelectionDAG &DAG) {
10177 switch (VT.SimpleTy) {
10179 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10181 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10183 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10185 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10187 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10189 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10192 llvm_unreachable("Unimplemented!");
10196 /// \brief Helper function to test whether a shuffle mask could be
10197 /// simplified by widening the elements being shuffled.
10199 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
10200 /// leaves it in an unspecified state.
10202 /// NOTE: This must handle normal vector shuffle masks and *target* vector
10203 /// shuffle masks. The latter have the special property of a '-2' representing
10204 /// a zero-ed lane of a vector.
10205 static bool canWidenShuffleElements(ArrayRef<int> Mask,
10206 SmallVectorImpl<int> &WidenedMask) {
10207 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
10208 // If both elements are undef, its trivial.
10209 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
10210 WidenedMask.push_back(SM_SentinelUndef);
10214 // Check for an undef mask and a mask value properly aligned to fit with
10215 // a pair of values. If we find such a case, use the non-undef mask's value.
10216 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
10217 WidenedMask.push_back(Mask[i + 1] / 2);
10220 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
10221 WidenedMask.push_back(Mask[i] / 2);
10225 // When zeroing, we need to spread the zeroing across both lanes to widen.
10226 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
10227 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
10228 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
10229 WidenedMask.push_back(SM_SentinelZero);
10235 // Finally check if the two mask values are adjacent and aligned with
10237 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
10238 WidenedMask.push_back(Mask[i] / 2);
10242 // Otherwise we can't safely widen the elements used in this shuffle.
10245 assert(WidenedMask.size() == Mask.size() / 2 &&
10246 "Incorrect size of mask after widening the elements!");
10251 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
10253 /// This routine just extracts two subvectors, shuffles them independently, and
10254 /// then concatenates them back together. This should work effectively with all
10255 /// AVX vector shuffle types.
10256 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10257 SDValue V2, ArrayRef<int> Mask,
10258 SelectionDAG &DAG) {
10259 assert(VT.getSizeInBits() >= 256 &&
10260 "Only for 256-bit or wider vector shuffles!");
10261 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
10262 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
10264 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
10265 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
10267 int NumElements = VT.getVectorNumElements();
10268 int SplitNumElements = NumElements / 2;
10269 MVT ScalarVT = VT.getScalarType();
10270 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
10272 // Rather than splitting build-vectors, just build two narrower build
10273 // vectors. This helps shuffling with splats and zeros.
10274 auto SplitVector = [&](SDValue V) {
10275 while (V.getOpcode() == ISD::BITCAST)
10276 V = V->getOperand(0);
10278 MVT OrigVT = V.getSimpleValueType();
10279 int OrigNumElements = OrigVT.getVectorNumElements();
10280 int OrigSplitNumElements = OrigNumElements / 2;
10281 MVT OrigScalarVT = OrigVT.getScalarType();
10282 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
10286 auto *BV = dyn_cast<BuildVectorSDNode>(V);
10288 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10289 DAG.getIntPtrConstant(0));
10290 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10291 DAG.getIntPtrConstant(OrigSplitNumElements));
10294 SmallVector<SDValue, 16> LoOps, HiOps;
10295 for (int i = 0; i < OrigSplitNumElements; ++i) {
10296 LoOps.push_back(BV->getOperand(i));
10297 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
10299 LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
10300 HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
10302 return std::make_pair(DAG.getNode(ISD::BITCAST, DL, SplitVT, LoV),
10303 DAG.getNode(ISD::BITCAST, DL, SplitVT, HiV));
10306 SDValue LoV1, HiV1, LoV2, HiV2;
10307 std::tie(LoV1, HiV1) = SplitVector(V1);
10308 std::tie(LoV2, HiV2) = SplitVector(V2);
10310 // Now create two 4-way blends of these half-width vectors.
10311 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10312 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10313 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
10314 for (int i = 0; i < SplitNumElements; ++i) {
10315 int M = HalfMask[i];
10316 if (M >= NumElements) {
10317 if (M >= NumElements + SplitNumElements)
10321 V2BlendMask.push_back(M - NumElements);
10322 V1BlendMask.push_back(-1);
10323 BlendMask.push_back(SplitNumElements + i);
10324 } else if (M >= 0) {
10325 if (M >= SplitNumElements)
10329 V2BlendMask.push_back(-1);
10330 V1BlendMask.push_back(M);
10331 BlendMask.push_back(i);
10333 V2BlendMask.push_back(-1);
10334 V1BlendMask.push_back(-1);
10335 BlendMask.push_back(-1);
10339 // Because the lowering happens after all combining takes place, we need to
10340 // manually combine these blend masks as much as possible so that we create
10341 // a minimal number of high-level vector shuffle nodes.
10343 // First try just blending the halves of V1 or V2.
10344 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10345 return DAG.getUNDEF(SplitVT);
10346 if (!UseLoV2 && !UseHiV2)
10347 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10348 if (!UseLoV1 && !UseHiV1)
10349 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10351 SDValue V1Blend, V2Blend;
10352 if (UseLoV1 && UseHiV1) {
10354 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10356 // We only use half of V1 so map the usage down into the final blend mask.
10357 V1Blend = UseLoV1 ? LoV1 : HiV1;
10358 for (int i = 0; i < SplitNumElements; ++i)
10359 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10360 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10362 if (UseLoV2 && UseHiV2) {
10364 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10366 // We only use half of V2 so map the usage down into the final blend mask.
10367 V2Blend = UseLoV2 ? LoV2 : HiV2;
10368 for (int i = 0; i < SplitNumElements; ++i)
10369 if (BlendMask[i] >= SplitNumElements)
10370 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10372 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10374 SDValue Lo = HalfBlend(LoMask);
10375 SDValue Hi = HalfBlend(HiMask);
10376 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10379 /// \brief Either split a vector in halves or decompose the shuffles and the
10382 /// This is provided as a good fallback for many lowerings of non-single-input
10383 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10384 /// between splitting the shuffle into 128-bit components and stitching those
10385 /// back together vs. extracting the single-input shuffles and blending those
10387 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10388 SDValue V2, ArrayRef<int> Mask,
10389 SelectionDAG &DAG) {
10390 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10391 "lower single-input shuffles as it "
10392 "could then recurse on itself.");
10393 int Size = Mask.size();
10395 // If this can be modeled as a broadcast of two elements followed by a blend,
10396 // prefer that lowering. This is especially important because broadcasts can
10397 // often fold with memory operands.
10398 auto DoBothBroadcast = [&] {
10399 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10402 if (V2BroadcastIdx == -1)
10403 V2BroadcastIdx = M - Size;
10404 else if (M - Size != V2BroadcastIdx)
10406 } else if (M >= 0) {
10407 if (V1BroadcastIdx == -1)
10408 V1BroadcastIdx = M;
10409 else if (M != V1BroadcastIdx)
10414 if (DoBothBroadcast())
10415 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10418 // If the inputs all stem from a single 128-bit lane of each input, then we
10419 // split them rather than blending because the split will decompose to
10420 // unusually few instructions.
10421 int LaneCount = VT.getSizeInBits() / 128;
10422 int LaneSize = Size / LaneCount;
10423 SmallBitVector LaneInputs[2];
10424 LaneInputs[0].resize(LaneCount, false);
10425 LaneInputs[1].resize(LaneCount, false);
10426 for (int i = 0; i < Size; ++i)
10428 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10429 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10430 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10432 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10433 // that the decomposed single-input shuffles don't end up here.
10434 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10437 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10438 /// a permutation and blend of those lanes.
10440 /// This essentially blends the out-of-lane inputs to each lane into the lane
10441 /// from a permuted copy of the vector. This lowering strategy results in four
10442 /// instructions in the worst case for a single-input cross lane shuffle which
10443 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10444 /// of. Special cases for each particular shuffle pattern should be handled
10445 /// prior to trying this lowering.
10446 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10447 SDValue V1, SDValue V2,
10448 ArrayRef<int> Mask,
10449 SelectionDAG &DAG) {
10450 // FIXME: This should probably be generalized for 512-bit vectors as well.
10451 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10452 int LaneSize = Mask.size() / 2;
10454 // If there are only inputs from one 128-bit lane, splitting will in fact be
10455 // less expensive. The flags track wether the given lane contains an element
10456 // that crosses to another lane.
10457 bool LaneCrossing[2] = {false, false};
10458 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10459 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10460 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10461 if (!LaneCrossing[0] || !LaneCrossing[1])
10462 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10464 if (isSingleInputShuffleMask(Mask)) {
10465 SmallVector<int, 32> FlippedBlendMask;
10466 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10467 FlippedBlendMask.push_back(
10468 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10470 : Mask[i] % LaneSize +
10471 (i / LaneSize) * LaneSize + Size));
10473 // Flip the vector, and blend the results which should now be in-lane. The
10474 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10475 // 5 for the high source. The value 3 selects the high half of source 2 and
10476 // the value 2 selects the low half of source 2. We only use source 2 to
10477 // allow folding it into a memory operand.
10478 unsigned PERMMask = 3 | 2 << 4;
10479 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10480 V1, DAG.getConstant(PERMMask, MVT::i8));
10481 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10484 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10485 // will be handled by the above logic and a blend of the results, much like
10486 // other patterns in AVX.
10487 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10490 /// \brief Handle lowering 2-lane 128-bit shuffles.
10491 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10492 SDValue V2, ArrayRef<int> Mask,
10493 const X86Subtarget *Subtarget,
10494 SelectionDAG &DAG) {
10495 // Blends are faster and handle all the non-lane-crossing cases.
10496 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10500 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10501 VT.getVectorNumElements() / 2);
10502 // Check for patterns which can be matched with a single insert of a 128-bit
10504 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 0, 1) ||
10505 isShuffleEquivalent(V1, V2, Mask, 0, 1, 4, 5)) {
10506 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10507 DAG.getIntPtrConstant(0));
10508 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10509 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10510 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10512 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 6, 7)) {
10513 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10514 DAG.getIntPtrConstant(0));
10515 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10516 DAG.getIntPtrConstant(2));
10517 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10520 // Otherwise form a 128-bit permutation.
10521 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10522 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10523 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10524 DAG.getConstant(PermMask, MVT::i8));
10527 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10528 /// shuffling each lane.
10530 /// This will only succeed when the result of fixing the 128-bit lanes results
10531 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10532 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10533 /// the lane crosses early and then use simpler shuffles within each lane.
10535 /// FIXME: It might be worthwhile at some point to support this without
10536 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10537 /// in x86 only floating point has interesting non-repeating shuffles, and even
10538 /// those are still *marginally* more expensive.
10539 static SDValue lowerVectorShuffleByMerging128BitLanes(
10540 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10541 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10542 assert(!isSingleInputShuffleMask(Mask) &&
10543 "This is only useful with multiple inputs.");
10545 int Size = Mask.size();
10546 int LaneSize = 128 / VT.getScalarSizeInBits();
10547 int NumLanes = Size / LaneSize;
10548 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10550 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10551 // check whether the in-128-bit lane shuffles share a repeating pattern.
10552 SmallVector<int, 4> Lanes;
10553 Lanes.resize(NumLanes, -1);
10554 SmallVector<int, 4> InLaneMask;
10555 InLaneMask.resize(LaneSize, -1);
10556 for (int i = 0; i < Size; ++i) {
10560 int j = i / LaneSize;
10562 if (Lanes[j] < 0) {
10563 // First entry we've seen for this lane.
10564 Lanes[j] = Mask[i] / LaneSize;
10565 } else if (Lanes[j] != Mask[i] / LaneSize) {
10566 // This doesn't match the lane selected previously!
10570 // Check that within each lane we have a consistent shuffle mask.
10571 int k = i % LaneSize;
10572 if (InLaneMask[k] < 0) {
10573 InLaneMask[k] = Mask[i] % LaneSize;
10574 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10575 // This doesn't fit a repeating in-lane mask.
10580 // First shuffle the lanes into place.
10581 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10582 VT.getSizeInBits() / 64);
10583 SmallVector<int, 8> LaneMask;
10584 LaneMask.resize(NumLanes * 2, -1);
10585 for (int i = 0; i < NumLanes; ++i)
10586 if (Lanes[i] >= 0) {
10587 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10588 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10591 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10592 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10593 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10595 // Cast it back to the type we actually want.
10596 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10598 // Now do a simple shuffle that isn't lane crossing.
10599 SmallVector<int, 8> NewMask;
10600 NewMask.resize(Size, -1);
10601 for (int i = 0; i < Size; ++i)
10603 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10604 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10605 "Must not introduce lane crosses at this point!");
10607 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10610 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10613 /// This returns true if the elements from a particular input are already in the
10614 /// slot required by the given mask and require no permutation.
10615 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10616 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10617 int Size = Mask.size();
10618 for (int i = 0; i < Size; ++i)
10619 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10625 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10627 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10628 /// isn't available.
10629 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10630 const X86Subtarget *Subtarget,
10631 SelectionDAG &DAG) {
10633 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10634 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10635 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10636 ArrayRef<int> Mask = SVOp->getMask();
10637 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10639 SmallVector<int, 4> WidenedMask;
10640 if (canWidenShuffleElements(Mask, WidenedMask))
10641 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10644 if (isSingleInputShuffleMask(Mask)) {
10645 // Check for being able to broadcast a single element.
10646 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10647 Mask, Subtarget, DAG))
10650 // Use low duplicate instructions for masks that match their pattern.
10651 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
10652 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10654 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10655 // Non-half-crossing single input shuffles can be lowerid with an
10656 // interleaved permutation.
10657 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10658 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10659 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10660 DAG.getConstant(VPERMILPMask, MVT::i8));
10663 // With AVX2 we have direct support for this permutation.
10664 if (Subtarget->hasAVX2())
10665 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10666 getV4X86ShuffleImm8ForMask(Mask, DAG));
10668 // Otherwise, fall back.
10669 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10673 // X86 has dedicated unpack instructions that can handle specific blend
10674 // operations: UNPCKH and UNPCKL.
10675 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10676 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10677 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10678 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10680 // If we have a single input to the zero element, insert that into V1 if we
10681 // can do so cheaply.
10682 int NumV2Elements =
10683 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10684 if (NumV2Elements == 1 && Mask[0] >= 4)
10685 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10686 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10689 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10693 // Check if the blend happens to exactly fit that of SHUFPD.
10694 if ((Mask[0] == -1 || Mask[0] < 2) &&
10695 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10696 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10697 (Mask[3] == -1 || Mask[3] >= 6)) {
10698 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10699 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10700 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10701 DAG.getConstant(SHUFPDMask, MVT::i8));
10703 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10704 (Mask[1] == -1 || Mask[1] < 2) &&
10705 (Mask[2] == -1 || Mask[2] >= 6) &&
10706 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10707 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10708 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10709 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10710 DAG.getConstant(SHUFPDMask, MVT::i8));
10713 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10714 // shuffle. However, if we have AVX2 and either inputs are already in place,
10715 // we will be able to shuffle even across lanes the other input in a single
10716 // instruction so skip this pattern.
10717 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10718 isShuffleMaskInputInPlace(1, Mask))))
10719 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10720 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10723 // If we have AVX2 then we always want to lower with a blend because an v4 we
10724 // can fully permute the elements.
10725 if (Subtarget->hasAVX2())
10726 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10729 // Otherwise fall back on generic lowering.
10730 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10733 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10735 /// This routine is only called when we have AVX2 and thus a reasonable
10736 /// instruction set for v4i64 shuffling..
10737 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10738 const X86Subtarget *Subtarget,
10739 SelectionDAG &DAG) {
10741 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10742 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10743 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10744 ArrayRef<int> Mask = SVOp->getMask();
10745 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10746 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10748 SmallVector<int, 4> WidenedMask;
10749 if (canWidenShuffleElements(Mask, WidenedMask))
10750 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10753 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10757 // Check for being able to broadcast a single element.
10758 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10759 Mask, Subtarget, DAG))
10762 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10763 // use lower latency instructions that will operate on both 128-bit lanes.
10764 SmallVector<int, 2> RepeatedMask;
10765 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10766 if (isSingleInputShuffleMask(Mask)) {
10767 int PSHUFDMask[] = {-1, -1, -1, -1};
10768 for (int i = 0; i < 2; ++i)
10769 if (RepeatedMask[i] >= 0) {
10770 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10771 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10773 return DAG.getNode(
10774 ISD::BITCAST, DL, MVT::v4i64,
10775 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10776 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10777 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10781 // AVX2 provides a direct instruction for permuting a single input across
10783 if (isSingleInputShuffleMask(Mask))
10784 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10785 getV4X86ShuffleImm8ForMask(Mask, DAG));
10787 // Try to use byte shift instructions.
10788 if (SDValue Shift = lowerVectorShuffleAsByteShift(
10789 DL, MVT::v4i64, V1, V2, Mask, DAG))
10792 // Use dedicated unpack instructions for masks that match their pattern.
10793 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10794 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10795 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10796 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10798 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10799 // shuffle. However, if we have AVX2 and either inputs are already in place,
10800 // we will be able to shuffle even across lanes the other input in a single
10801 // instruction so skip this pattern.
10802 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10803 isShuffleMaskInputInPlace(1, Mask))))
10804 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10805 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10808 // Otherwise fall back on generic blend lowering.
10809 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10813 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10815 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10816 /// isn't available.
10817 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10818 const X86Subtarget *Subtarget,
10819 SelectionDAG &DAG) {
10821 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10822 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10823 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10824 ArrayRef<int> Mask = SVOp->getMask();
10825 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10827 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10831 // Check for being able to broadcast a single element.
10832 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10833 Mask, Subtarget, DAG))
10836 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10837 // options to efficiently lower the shuffle.
10838 SmallVector<int, 4> RepeatedMask;
10839 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10840 assert(RepeatedMask.size() == 4 &&
10841 "Repeated masks must be half the mask width!");
10843 // Use even/odd duplicate instructions for masks that match their pattern.
10844 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10845 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10846 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10847 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10849 if (isSingleInputShuffleMask(Mask))
10850 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10851 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10853 // Use dedicated unpack instructions for masks that match their pattern.
10854 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10855 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10856 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10857 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10859 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10860 // have already handled any direct blends. We also need to squash the
10861 // repeated mask into a simulated v4f32 mask.
10862 for (int i = 0; i < 4; ++i)
10863 if (RepeatedMask[i] >= 8)
10864 RepeatedMask[i] -= 4;
10865 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10868 // If we have a single input shuffle with different shuffle patterns in the
10869 // two 128-bit lanes use the variable mask to VPERMILPS.
10870 if (isSingleInputShuffleMask(Mask)) {
10871 SDValue VPermMask[8];
10872 for (int i = 0; i < 8; ++i)
10873 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10874 : DAG.getConstant(Mask[i], MVT::i32);
10875 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10876 return DAG.getNode(
10877 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10878 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10880 if (Subtarget->hasAVX2())
10881 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10882 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10883 DAG.getNode(ISD::BUILD_VECTOR, DL,
10884 MVT::v8i32, VPermMask)),
10887 // Otherwise, fall back.
10888 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10892 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10894 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10895 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10898 // If we have AVX2 then we always want to lower with a blend because at v8 we
10899 // can fully permute the elements.
10900 if (Subtarget->hasAVX2())
10901 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10904 // Otherwise fall back on generic lowering.
10905 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10908 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10910 /// This routine is only called when we have AVX2 and thus a reasonable
10911 /// instruction set for v8i32 shuffling..
10912 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10913 const X86Subtarget *Subtarget,
10914 SelectionDAG &DAG) {
10916 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10917 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10918 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10919 ArrayRef<int> Mask = SVOp->getMask();
10920 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10921 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10923 // Whenever we can lower this as a zext, that instruction is strictly faster
10924 // than any alternative. It also allows us to fold memory operands into the
10925 // shuffle in many cases.
10926 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
10927 Mask, Subtarget, DAG))
10930 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10934 // Check for being able to broadcast a single element.
10935 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10936 Mask, Subtarget, DAG))
10939 // If the shuffle mask is repeated in each 128-bit lane we can use more
10940 // efficient instructions that mirror the shuffles across the two 128-bit
10942 SmallVector<int, 4> RepeatedMask;
10943 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10944 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10945 if (isSingleInputShuffleMask(Mask))
10946 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10947 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10949 // Use dedicated unpack instructions for masks that match their pattern.
10950 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10951 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10952 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10953 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10956 // Try to use bit shift instructions.
10957 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10958 DL, MVT::v8i32, V1, V2, Mask, DAG))
10961 // Try to use byte shift instructions.
10962 if (SDValue Shift = lowerVectorShuffleAsByteShift(
10963 DL, MVT::v8i32, V1, V2, Mask, DAG))
10966 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
10967 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10970 // If the shuffle patterns aren't repeated but it is a single input, directly
10971 // generate a cross-lane VPERMD instruction.
10972 if (isSingleInputShuffleMask(Mask)) {
10973 SDValue VPermMask[8];
10974 for (int i = 0; i < 8; ++i)
10975 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10976 : DAG.getConstant(Mask[i], MVT::i32);
10977 return DAG.getNode(
10978 X86ISD::VPERMV, DL, MVT::v8i32,
10979 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10982 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10984 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10985 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10988 // Otherwise fall back on generic blend lowering.
10989 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10993 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10995 /// This routine is only called when we have AVX2 and thus a reasonable
10996 /// instruction set for v16i16 shuffling..
10997 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10998 const X86Subtarget *Subtarget,
10999 SelectionDAG &DAG) {
11001 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
11002 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
11003 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11004 ArrayRef<int> Mask = SVOp->getMask();
11005 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11006 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
11008 // Whenever we can lower this as a zext, that instruction is strictly faster
11009 // than any alternative. It also allows us to fold memory operands into the
11010 // shuffle in many cases.
11011 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
11012 Mask, Subtarget, DAG))
11015 // Check for being able to broadcast a single element.
11016 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
11017 Mask, Subtarget, DAG))
11020 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
11024 // Use dedicated unpack instructions for masks that match their pattern.
11025 if (isShuffleEquivalent(V1, V2, Mask,
11026 // First 128-bit lane:
11027 0, 16, 1, 17, 2, 18, 3, 19,
11028 // Second 128-bit lane:
11029 8, 24, 9, 25, 10, 26, 11, 27))
11030 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
11031 if (isShuffleEquivalent(V1, V2, Mask,
11032 // First 128-bit lane:
11033 4, 20, 5, 21, 6, 22, 7, 23,
11034 // Second 128-bit lane:
11035 12, 28, 13, 29, 14, 30, 15, 31))
11036 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
11038 // Try to use bit shift instructions.
11039 if (SDValue Shift = lowerVectorShuffleAsBitShift(
11040 DL, MVT::v16i16, V1, V2, Mask, DAG))
11043 // Try to use byte shift instructions.
11044 if (SDValue Shift = lowerVectorShuffleAsByteShift(
11045 DL, MVT::v16i16, V1, V2, Mask, DAG))
11048 // Try to use byte rotation instructions.
11049 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11050 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
11053 if (isSingleInputShuffleMask(Mask)) {
11054 // There are no generalized cross-lane shuffle operations available on i16
11056 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
11057 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
11060 SDValue PSHUFBMask[32];
11061 for (int i = 0; i < 16; ++i) {
11062 if (Mask[i] == -1) {
11063 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
11067 int M = i < 8 ? Mask[i] : Mask[i] - 8;
11068 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
11069 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
11070 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
11072 return DAG.getNode(
11073 ISD::BITCAST, DL, MVT::v16i16,
11075 X86ISD::PSHUFB, DL, MVT::v32i8,
11076 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
11077 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
11080 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11082 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11083 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
11086 // Otherwise fall back on generic lowering.
11087 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
11090 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
11092 /// This routine is only called when we have AVX2 and thus a reasonable
11093 /// instruction set for v32i8 shuffling..
11094 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11095 const X86Subtarget *Subtarget,
11096 SelectionDAG &DAG) {
11098 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
11099 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
11100 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11101 ArrayRef<int> Mask = SVOp->getMask();
11102 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11103 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
11105 // Whenever we can lower this as a zext, that instruction is strictly faster
11106 // than any alternative. It also allows us to fold memory operands into the
11107 // shuffle in many cases.
11108 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
11109 Mask, Subtarget, DAG))
11112 // Check for being able to broadcast a single element.
11113 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
11114 Mask, Subtarget, DAG))
11117 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
11121 // Use dedicated unpack instructions for masks that match their pattern.
11122 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
11124 if (isShuffleEquivalent(
11126 // First 128-bit lane:
11127 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
11128 // Second 128-bit lane:
11129 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
11130 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
11131 if (isShuffleEquivalent(
11133 // First 128-bit lane:
11134 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
11135 // Second 128-bit lane:
11136 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
11137 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
11139 // Try to use bit shift instructions.
11140 if (SDValue Shift = lowerVectorShuffleAsBitShift(
11141 DL, MVT::v32i8, V1, V2, Mask, DAG))
11144 // Try to use byte shift instructions.
11145 if (SDValue Shift = lowerVectorShuffleAsByteShift(
11146 DL, MVT::v32i8, V1, V2, Mask, DAG))
11149 // Try to use byte rotation instructions.
11150 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
11151 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11154 if (isSingleInputShuffleMask(Mask)) {
11155 // There are no generalized cross-lane shuffle operations available on i8
11157 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
11158 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
11161 SDValue PSHUFBMask[32];
11162 for (int i = 0; i < 32; ++i)
11165 ? DAG.getUNDEF(MVT::i8)
11166 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
11168 return DAG.getNode(
11169 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
11170 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
11173 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11175 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11176 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11179 // Otherwise fall back on generic lowering.
11180 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
11183 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
11185 /// This routine either breaks down the specific type of a 256-bit x86 vector
11186 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
11187 /// together based on the available instructions.
11188 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11189 MVT VT, const X86Subtarget *Subtarget,
11190 SelectionDAG &DAG) {
11192 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11193 ArrayRef<int> Mask = SVOp->getMask();
11195 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
11196 // check for those subtargets here and avoid much of the subtarget querying in
11197 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
11198 // ability to manipulate a 256-bit vector with integer types. Since we'll use
11199 // floating point types there eventually, just immediately cast everything to
11200 // a float and operate entirely in that domain.
11201 if (VT.isInteger() && !Subtarget->hasAVX2()) {
11202 int ElementBits = VT.getScalarSizeInBits();
11203 if (ElementBits < 32)
11204 // No floating point type available, decompose into 128-bit vectors.
11205 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11207 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
11208 VT.getVectorNumElements());
11209 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
11210 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
11211 return DAG.getNode(ISD::BITCAST, DL, VT,
11212 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
11215 switch (VT.SimpleTy) {
11217 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11219 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11221 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11223 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11225 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11227 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11230 llvm_unreachable("Not a valid 256-bit x86 vector type!");
11234 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
11235 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11236 const X86Subtarget *Subtarget,
11237 SelectionDAG &DAG) {
11239 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11240 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11241 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11242 ArrayRef<int> Mask = SVOp->getMask();
11243 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11245 // X86 has dedicated unpack instructions that can handle specific blend
11246 // operations: UNPCKH and UNPCKL.
11247 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11248 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
11249 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11250 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
11252 // FIXME: Implement direct support for this type!
11253 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
11256 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
11257 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11258 const X86Subtarget *Subtarget,
11259 SelectionDAG &DAG) {
11261 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11262 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11263 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11264 ArrayRef<int> Mask = SVOp->getMask();
11265 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11267 // Use dedicated unpack instructions for masks that match their pattern.
11268 if (isShuffleEquivalent(V1, V2, Mask,
11269 0, 16, 1, 17, 4, 20, 5, 21,
11270 8, 24, 9, 25, 12, 28, 13, 29))
11271 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
11272 if (isShuffleEquivalent(V1, V2, Mask,
11273 2, 18, 3, 19, 6, 22, 7, 23,
11274 10, 26, 11, 27, 14, 30, 15, 31))
11275 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
11277 // FIXME: Implement direct support for this type!
11278 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
11281 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
11282 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11283 const X86Subtarget *Subtarget,
11284 SelectionDAG &DAG) {
11286 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11287 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11288 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11289 ArrayRef<int> Mask = SVOp->getMask();
11290 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11292 // X86 has dedicated unpack instructions that can handle specific blend
11293 // operations: UNPCKH and UNPCKL.
11294 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11295 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
11296 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11297 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
11299 // FIXME: Implement direct support for this type!
11300 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
11303 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11304 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11305 const X86Subtarget *Subtarget,
11306 SelectionDAG &DAG) {
11308 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11309 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11310 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11311 ArrayRef<int> Mask = SVOp->getMask();
11312 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11314 // Use dedicated unpack instructions for masks that match their pattern.
11315 if (isShuffleEquivalent(V1, V2, Mask,
11316 0, 16, 1, 17, 4, 20, 5, 21,
11317 8, 24, 9, 25, 12, 28, 13, 29))
11318 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
11319 if (isShuffleEquivalent(V1, V2, Mask,
11320 2, 18, 3, 19, 6, 22, 7, 23,
11321 10, 26, 11, 27, 14, 30, 15, 31))
11322 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
11324 // FIXME: Implement direct support for this type!
11325 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
11328 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11329 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11330 const X86Subtarget *Subtarget,
11331 SelectionDAG &DAG) {
11333 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11334 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11335 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11336 ArrayRef<int> Mask = SVOp->getMask();
11337 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11338 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
11340 // FIXME: Implement direct support for this type!
11341 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
11344 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
11345 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11346 const X86Subtarget *Subtarget,
11347 SelectionDAG &DAG) {
11349 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11350 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11351 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11352 ArrayRef<int> Mask = SVOp->getMask();
11353 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
11354 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
11356 // FIXME: Implement direct support for this type!
11357 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
11360 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
11362 /// This routine either breaks down the specific type of a 512-bit x86 vector
11363 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
11364 /// together based on the available instructions.
11365 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11366 MVT VT, const X86Subtarget *Subtarget,
11367 SelectionDAG &DAG) {
11369 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11370 ArrayRef<int> Mask = SVOp->getMask();
11371 assert(Subtarget->hasAVX512() &&
11372 "Cannot lower 512-bit vectors w/ basic ISA!");
11374 // Check for being able to broadcast a single element.
11375 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
11376 Mask, Subtarget, DAG))
11379 // Dispatch to each element type for lowering. If we don't have supprot for
11380 // specific element type shuffles at 512 bits, immediately split them and
11381 // lower them. Each lowering routine of a given type is allowed to assume that
11382 // the requisite ISA extensions for that element type are available.
11383 switch (VT.SimpleTy) {
11385 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11387 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11389 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11391 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11393 if (Subtarget->hasBWI())
11394 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11397 if (Subtarget->hasBWI())
11398 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11402 llvm_unreachable("Not a valid 512-bit x86 vector type!");
11405 // Otherwise fall back on splitting.
11406 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11409 /// \brief Top-level lowering for x86 vector shuffles.
11411 /// This handles decomposition, canonicalization, and lowering of all x86
11412 /// vector shuffles. Most of the specific lowering strategies are encapsulated
11413 /// above in helper routines. The canonicalization attempts to widen shuffles
11414 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
11415 /// s.t. only one of the two inputs needs to be tested, etc.
11416 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
11417 SelectionDAG &DAG) {
11418 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11419 ArrayRef<int> Mask = SVOp->getMask();
11420 SDValue V1 = Op.getOperand(0);
11421 SDValue V2 = Op.getOperand(1);
11422 MVT VT = Op.getSimpleValueType();
11423 int NumElements = VT.getVectorNumElements();
11426 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
11428 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
11429 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11430 if (V1IsUndef && V2IsUndef)
11431 return DAG.getUNDEF(VT);
11433 // When we create a shuffle node we put the UNDEF node to second operand,
11434 // but in some cases the first operand may be transformed to UNDEF.
11435 // In this case we should just commute the node.
11437 return DAG.getCommutedVectorShuffle(*SVOp);
11439 // Check for non-undef masks pointing at an undef vector and make the masks
11440 // undef as well. This makes it easier to match the shuffle based solely on
11444 if (M >= NumElements) {
11445 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11446 for (int &M : NewMask)
11447 if (M >= NumElements)
11449 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11452 // We actually see shuffles that are entirely re-arrangements of a set of
11453 // zero inputs. This mostly happens while decomposing complex shuffles into
11454 // simple ones. Directly lower these as a buildvector of zeros.
11455 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
11456 if (Zeroable.all())
11457 return getZeroVector(VT, Subtarget, DAG, dl);
11459 // Try to collapse shuffles into using a vector type with fewer elements but
11460 // wider element types. We cap this to not form integers or floating point
11461 // elements wider than 64 bits, but it might be interesting to form i128
11462 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11463 SmallVector<int, 16> WidenedMask;
11464 if (VT.getScalarSizeInBits() < 64 &&
11465 canWidenShuffleElements(Mask, WidenedMask)) {
11466 MVT NewEltVT = VT.isFloatingPoint()
11467 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11468 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11469 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11470 // Make sure that the new vector type is legal. For example, v2f64 isn't
11472 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11473 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11474 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11475 return DAG.getNode(ISD::BITCAST, dl, VT,
11476 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11480 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11481 for (int M : SVOp->getMask())
11483 ++NumUndefElements;
11484 else if (M < NumElements)
11489 // Commute the shuffle as needed such that more elements come from V1 than
11490 // V2. This allows us to match the shuffle pattern strictly on how many
11491 // elements come from V1 without handling the symmetric cases.
11492 if (NumV2Elements > NumV1Elements)
11493 return DAG.getCommutedVectorShuffle(*SVOp);
11495 // When the number of V1 and V2 elements are the same, try to minimize the
11496 // number of uses of V2 in the low half of the vector. When that is tied,
11497 // ensure that the sum of indices for V1 is equal to or lower than the sum
11498 // indices for V2. When those are equal, try to ensure that the number of odd
11499 // indices for V1 is lower than the number of odd indices for V2.
11500 if (NumV1Elements == NumV2Elements) {
11501 int LowV1Elements = 0, LowV2Elements = 0;
11502 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11503 if (M >= NumElements)
11507 if (LowV2Elements > LowV1Elements) {
11508 return DAG.getCommutedVectorShuffle(*SVOp);
11509 } else if (LowV2Elements == LowV1Elements) {
11510 int SumV1Indices = 0, SumV2Indices = 0;
11511 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11512 if (SVOp->getMask()[i] >= NumElements)
11514 else if (SVOp->getMask()[i] >= 0)
11516 if (SumV2Indices < SumV1Indices) {
11517 return DAG.getCommutedVectorShuffle(*SVOp);
11518 } else if (SumV2Indices == SumV1Indices) {
11519 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11520 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11521 if (SVOp->getMask()[i] >= NumElements)
11522 NumV2OddIndices += i % 2;
11523 else if (SVOp->getMask()[i] >= 0)
11524 NumV1OddIndices += i % 2;
11525 if (NumV2OddIndices < NumV1OddIndices)
11526 return DAG.getCommutedVectorShuffle(*SVOp);
11531 // For each vector width, delegate to a specialized lowering routine.
11532 if (VT.getSizeInBits() == 128)
11533 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11535 if (VT.getSizeInBits() == 256)
11536 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11538 // Force AVX-512 vectors to be scalarized for now.
11539 // FIXME: Implement AVX-512 support!
11540 if (VT.getSizeInBits() == 512)
11541 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11543 llvm_unreachable("Unimplemented!");
11547 //===----------------------------------------------------------------------===//
11548 // Legacy vector shuffle lowering
11550 // This code is the legacy code handling vector shuffles until the above
11551 // replaces its functionality and performance.
11552 //===----------------------------------------------------------------------===//
11554 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11555 bool hasInt256, unsigned *MaskOut = nullptr) {
11556 MVT EltVT = VT.getVectorElementType();
11558 // There is no blend with immediate in AVX-512.
11559 if (VT.is512BitVector())
11562 if (!hasSSE41 || EltVT == MVT::i8)
11564 if (!hasInt256 && VT == MVT::v16i16)
11567 unsigned MaskValue = 0;
11568 unsigned NumElems = VT.getVectorNumElements();
11569 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11570 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11571 unsigned NumElemsInLane = NumElems / NumLanes;
11573 // Blend for v16i16 should be symmetric for both lanes.
11574 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11576 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11577 int EltIdx = MaskVals[i];
11579 if ((EltIdx < 0 || EltIdx == (int)i) &&
11580 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11583 if (((unsigned)EltIdx == (i + NumElems)) &&
11584 (SndLaneEltIdx < 0 ||
11585 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11586 MaskValue |= (1 << i);
11592 *MaskOut = MaskValue;
11596 // Try to lower a shuffle node into a simple blend instruction.
11597 // This function assumes isBlendMask returns true for this
11598 // SuffleVectorSDNode
11599 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11600 unsigned MaskValue,
11601 const X86Subtarget *Subtarget,
11602 SelectionDAG &DAG) {
11603 MVT VT = SVOp->getSimpleValueType(0);
11604 MVT EltVT = VT.getVectorElementType();
11605 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11606 Subtarget->hasInt256() && "Trying to lower a "
11607 "VECTOR_SHUFFLE to a Blend but "
11608 "with the wrong mask"));
11609 SDValue V1 = SVOp->getOperand(0);
11610 SDValue V2 = SVOp->getOperand(1);
11612 unsigned NumElems = VT.getVectorNumElements();
11614 // Convert i32 vectors to floating point if it is not AVX2.
11615 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11617 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11618 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11620 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11621 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11624 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11625 DAG.getConstant(MaskValue, MVT::i32));
11626 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11629 /// In vector type \p VT, return true if the element at index \p InputIdx
11630 /// falls on a different 128-bit lane than \p OutputIdx.
11631 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11632 unsigned OutputIdx) {
11633 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11634 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11637 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11638 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11639 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11640 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11642 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11643 SelectionDAG &DAG) {
11644 MVT VT = V1.getSimpleValueType();
11645 assert(VT.is128BitVector() || VT.is256BitVector());
11647 MVT EltVT = VT.getVectorElementType();
11648 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11649 unsigned NumElts = VT.getVectorNumElements();
11651 SmallVector<SDValue, 32> PshufbMask;
11652 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11653 int InputIdx = MaskVals[OutputIdx];
11654 unsigned InputByteIdx;
11656 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11657 InputByteIdx = 0x80;
11659 // Cross lane is not allowed.
11660 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11662 InputByteIdx = InputIdx * EltSizeInBytes;
11663 // Index is an byte offset within the 128-bit lane.
11664 InputByteIdx &= 0xf;
11667 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11668 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11669 if (InputByteIdx != 0x80)
11674 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11676 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11677 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11678 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11681 // v8i16 shuffles - Prefer shuffles in the following order:
11682 // 1. [all] pshuflw, pshufhw, optional move
11683 // 2. [ssse3] 1 x pshufb
11684 // 3. [ssse3] 2 x pshufb + 1 x por
11685 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11687 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11688 SelectionDAG &DAG) {
11689 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11690 SDValue V1 = SVOp->getOperand(0);
11691 SDValue V2 = SVOp->getOperand(1);
11693 SmallVector<int, 8> MaskVals;
11695 // Determine if more than 1 of the words in each of the low and high quadwords
11696 // of the result come from the same quadword of one of the two inputs. Undef
11697 // mask values count as coming from any quadword, for better codegen.
11699 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11700 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11701 unsigned LoQuad[] = { 0, 0, 0, 0 };
11702 unsigned HiQuad[] = { 0, 0, 0, 0 };
11703 // Indices of quads used.
11704 std::bitset<4> InputQuads;
11705 for (unsigned i = 0; i < 8; ++i) {
11706 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11707 int EltIdx = SVOp->getMaskElt(i);
11708 MaskVals.push_back(EltIdx);
11716 ++Quad[EltIdx / 4];
11717 InputQuads.set(EltIdx / 4);
11720 int BestLoQuad = -1;
11721 unsigned MaxQuad = 1;
11722 for (unsigned i = 0; i < 4; ++i) {
11723 if (LoQuad[i] > MaxQuad) {
11725 MaxQuad = LoQuad[i];
11729 int BestHiQuad = -1;
11731 for (unsigned i = 0; i < 4; ++i) {
11732 if (HiQuad[i] > MaxQuad) {
11734 MaxQuad = HiQuad[i];
11738 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11739 // of the two input vectors, shuffle them into one input vector so only a
11740 // single pshufb instruction is necessary. If there are more than 2 input
11741 // quads, disable the next transformation since it does not help SSSE3.
11742 bool V1Used = InputQuads[0] || InputQuads[1];
11743 bool V2Used = InputQuads[2] || InputQuads[3];
11744 if (Subtarget->hasSSSE3()) {
11745 if (InputQuads.count() == 2 && V1Used && V2Used) {
11746 BestLoQuad = InputQuads[0] ? 0 : 1;
11747 BestHiQuad = InputQuads[2] ? 2 : 3;
11749 if (InputQuads.count() > 2) {
11755 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11756 // the shuffle mask. If a quad is scored as -1, that means that it contains
11757 // words from all 4 input quadwords.
11759 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11761 BestLoQuad < 0 ? 0 : BestLoQuad,
11762 BestHiQuad < 0 ? 1 : BestHiQuad
11764 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11765 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11766 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11767 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11769 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11770 // source words for the shuffle, to aid later transformations.
11771 bool AllWordsInNewV = true;
11772 bool InOrder[2] = { true, true };
11773 for (unsigned i = 0; i != 8; ++i) {
11774 int idx = MaskVals[i];
11776 InOrder[i/4] = false;
11777 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11779 AllWordsInNewV = false;
11783 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11784 if (AllWordsInNewV) {
11785 for (int i = 0; i != 8; ++i) {
11786 int idx = MaskVals[i];
11789 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11790 if ((idx != i) && idx < 4)
11792 if ((idx != i) && idx > 3)
11801 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11802 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11803 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11804 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11805 unsigned TargetMask = 0;
11806 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11807 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11808 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11809 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11810 getShufflePSHUFLWImmediate(SVOp);
11811 V1 = NewV.getOperand(0);
11812 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11816 // Promote splats to a larger type which usually leads to more efficient code.
11817 // FIXME: Is this true if pshufb is available?
11818 if (SVOp->isSplat())
11819 return PromoteSplat(SVOp, DAG);
11821 // If we have SSSE3, and all words of the result are from 1 input vector,
11822 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11823 // is present, fall back to case 4.
11824 if (Subtarget->hasSSSE3()) {
11825 SmallVector<SDValue,16> pshufbMask;
11827 // If we have elements from both input vectors, set the high bit of the
11828 // shuffle mask element to zero out elements that come from V2 in the V1
11829 // mask, and elements that come from V1 in the V2 mask, so that the two
11830 // results can be OR'd together.
11831 bool TwoInputs = V1Used && V2Used;
11832 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11834 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11836 // Calculate the shuffle mask for the second input, shuffle it, and
11837 // OR it with the first shuffled input.
11838 CommuteVectorShuffleMask(MaskVals, 8);
11839 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11840 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11841 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11844 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11845 // and update MaskVals with new element order.
11846 std::bitset<8> InOrder;
11847 if (BestLoQuad >= 0) {
11848 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11849 for (int i = 0; i != 4; ++i) {
11850 int idx = MaskVals[i];
11853 } else if ((idx / 4) == BestLoQuad) {
11854 MaskV[i] = idx & 3;
11858 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11861 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11862 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11863 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11864 NewV.getOperand(0),
11865 getShufflePSHUFLWImmediate(SVOp), DAG);
11869 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11870 // and update MaskVals with the new element order.
11871 if (BestHiQuad >= 0) {
11872 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11873 for (unsigned i = 4; i != 8; ++i) {
11874 int idx = MaskVals[i];
11877 } else if ((idx / 4) == BestHiQuad) {
11878 MaskV[i] = (idx & 3) + 4;
11882 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11885 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11886 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11887 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11888 NewV.getOperand(0),
11889 getShufflePSHUFHWImmediate(SVOp), DAG);
11893 // In case BestHi & BestLo were both -1, which means each quadword has a word
11894 // from each of the four input quadwords, calculate the InOrder bitvector now
11895 // before falling through to the insert/extract cleanup.
11896 if (BestLoQuad == -1 && BestHiQuad == -1) {
11898 for (int i = 0; i != 8; ++i)
11899 if (MaskVals[i] < 0 || MaskVals[i] == i)
11903 // The other elements are put in the right place using pextrw and pinsrw.
11904 for (unsigned i = 0; i != 8; ++i) {
11907 int EltIdx = MaskVals[i];
11910 SDValue ExtOp = (EltIdx < 8) ?
11911 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11912 DAG.getIntPtrConstant(EltIdx)) :
11913 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11914 DAG.getIntPtrConstant(EltIdx - 8));
11915 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11916 DAG.getIntPtrConstant(i));
11921 /// \brief v16i16 shuffles
11923 /// FIXME: We only support generation of a single pshufb currently. We can
11924 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11925 /// well (e.g 2 x pshufb + 1 x por).
11927 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11928 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11929 SDValue V1 = SVOp->getOperand(0);
11930 SDValue V2 = SVOp->getOperand(1);
11933 if (V2.getOpcode() != ISD::UNDEF)
11936 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11937 return getPSHUFB(MaskVals, V1, dl, DAG);
11940 // v16i8 shuffles - Prefer shuffles in the following order:
11941 // 1. [ssse3] 1 x pshufb
11942 // 2. [ssse3] 2 x pshufb + 1 x por
11943 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11944 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11945 const X86Subtarget* Subtarget,
11946 SelectionDAG &DAG) {
11947 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11948 SDValue V1 = SVOp->getOperand(0);
11949 SDValue V2 = SVOp->getOperand(1);
11951 ArrayRef<int> MaskVals = SVOp->getMask();
11953 // Promote splats to a larger type which usually leads to more efficient code.
11954 // FIXME: Is this true if pshufb is available?
11955 if (SVOp->isSplat())
11956 return PromoteSplat(SVOp, DAG);
11958 // If we have SSSE3, case 1 is generated when all result bytes come from
11959 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11960 // present, fall back to case 3.
11962 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11963 if (Subtarget->hasSSSE3()) {
11964 SmallVector<SDValue,16> pshufbMask;
11966 // If all result elements are from one input vector, then only translate
11967 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11969 // Otherwise, we have elements from both input vectors, and must zero out
11970 // elements that come from V2 in the first mask, and V1 in the second mask
11971 // so that we can OR them together.
11972 for (unsigned i = 0; i != 16; ++i) {
11973 int EltIdx = MaskVals[i];
11974 if (EltIdx < 0 || EltIdx >= 16)
11976 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11978 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11979 DAG.getNode(ISD::BUILD_VECTOR, dl,
11980 MVT::v16i8, pshufbMask));
11982 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11983 // the 2nd operand if it's undefined or zero.
11984 if (V2.getOpcode() == ISD::UNDEF ||
11985 ISD::isBuildVectorAllZeros(V2.getNode()))
11988 // Calculate the shuffle mask for the second input, shuffle it, and
11989 // OR it with the first shuffled input.
11990 pshufbMask.clear();
11991 for (unsigned i = 0; i != 16; ++i) {
11992 int EltIdx = MaskVals[i];
11993 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11994 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11996 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11997 DAG.getNode(ISD::BUILD_VECTOR, dl,
11998 MVT::v16i8, pshufbMask));
11999 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
12002 // No SSSE3 - Calculate in place words and then fix all out of place words
12003 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
12004 // the 16 different words that comprise the two doublequadword input vectors.
12005 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
12006 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
12008 for (int i = 0; i != 8; ++i) {
12009 int Elt0 = MaskVals[i*2];
12010 int Elt1 = MaskVals[i*2+1];
12012 // This word of the result is all undef, skip it.
12013 if (Elt0 < 0 && Elt1 < 0)
12016 // This word of the result is already in the correct place, skip it.
12017 if ((Elt0 == i*2) && (Elt1 == i*2+1))
12020 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
12021 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
12024 // If Elt0 and Elt1 are defined, are consecutive, and can be load
12025 // using a single extract together, load it and store it.
12026 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
12027 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
12028 DAG.getIntPtrConstant(Elt1 / 2));
12029 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
12030 DAG.getIntPtrConstant(i));
12034 // If Elt1 is defined, extract it from the appropriate source. If the
12035 // source byte is not also odd, shift the extracted word left 8 bits
12036 // otherwise clear the bottom 8 bits if we need to do an or.
12038 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
12039 DAG.getIntPtrConstant(Elt1 / 2));
12040 if ((Elt1 & 1) == 0)
12041 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
12043 TLI.getShiftAmountTy(InsElt.getValueType())));
12044 else if (Elt0 >= 0)
12045 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
12046 DAG.getConstant(0xFF00, MVT::i16));
12048 // If Elt0 is defined, extract it from the appropriate source. If the
12049 // source byte is not also even, shift the extracted word right 8 bits. If
12050 // Elt1 was also defined, OR the extracted values together before
12051 // inserting them in the result.
12053 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
12054 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
12055 if ((Elt0 & 1) != 0)
12056 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
12058 TLI.getShiftAmountTy(InsElt0.getValueType())));
12059 else if (Elt1 >= 0)
12060 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
12061 DAG.getConstant(0x00FF, MVT::i16));
12062 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
12065 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
12066 DAG.getIntPtrConstant(i));
12068 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
12071 // v32i8 shuffles - Translate to VPSHUFB if possible.
12073 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
12074 const X86Subtarget *Subtarget,
12075 SelectionDAG &DAG) {
12076 MVT VT = SVOp->getSimpleValueType(0);
12077 SDValue V1 = SVOp->getOperand(0);
12078 SDValue V2 = SVOp->getOperand(1);
12080 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
12082 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12083 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
12084 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
12086 // VPSHUFB may be generated if
12087 // (1) one of input vector is undefined or zeroinitializer.
12088 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
12089 // And (2) the mask indexes don't cross the 128-bit lane.
12090 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
12091 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
12094 if (V1IsAllZero && !V2IsAllZero) {
12095 CommuteVectorShuffleMask(MaskVals, 32);
12098 return getPSHUFB(MaskVals, V1, dl, DAG);
12101 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
12102 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
12103 /// done when every pair / quad of shuffle mask elements point to elements in
12104 /// the right sequence. e.g.
12105 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
12107 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
12108 SelectionDAG &DAG) {
12109 MVT VT = SVOp->getSimpleValueType(0);
12111 unsigned NumElems = VT.getVectorNumElements();
12114 switch (VT.SimpleTy) {
12115 default: llvm_unreachable("Unexpected!");
12118 return SDValue(SVOp, 0);
12119 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
12120 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
12121 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
12122 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
12123 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
12124 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
12127 SmallVector<int, 8> MaskVec;
12128 for (unsigned i = 0; i != NumElems; i += Scale) {
12130 for (unsigned j = 0; j != Scale; ++j) {
12131 int EltIdx = SVOp->getMaskElt(i+j);
12135 StartIdx = (EltIdx / Scale);
12136 if (EltIdx != (int)(StartIdx*Scale + j))
12139 MaskVec.push_back(StartIdx);
12142 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
12143 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
12144 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
12147 /// getVZextMovL - Return a zero-extending vector move low node.
12149 static SDValue getVZextMovL(MVT VT, MVT OpVT,
12150 SDValue SrcOp, SelectionDAG &DAG,
12151 const X86Subtarget *Subtarget, SDLoc dl) {
12152 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
12153 LoadSDNode *LD = nullptr;
12154 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
12155 LD = dyn_cast<LoadSDNode>(SrcOp);
12157 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
12159 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
12160 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
12161 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
12162 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
12163 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
12165 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
12166 return DAG.getNode(ISD::BITCAST, dl, VT,
12167 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12168 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
12170 SrcOp.getOperand(0)
12176 return DAG.getNode(ISD::BITCAST, dl, VT,
12177 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12178 DAG.getNode(ISD::BITCAST, dl,
12182 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
12183 /// which could not be matched by any known target speficic shuffle
12185 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12187 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
12188 if (NewOp.getNode())
12191 MVT VT = SVOp->getSimpleValueType(0);
12193 unsigned NumElems = VT.getVectorNumElements();
12194 unsigned NumLaneElems = NumElems / 2;
12197 MVT EltVT = VT.getVectorElementType();
12198 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
12201 SmallVector<int, 16> Mask;
12202 for (unsigned l = 0; l < 2; ++l) {
12203 // Build a shuffle mask for the output, discovering on the fly which
12204 // input vectors to use as shuffle operands (recorded in InputUsed).
12205 // If building a suitable shuffle vector proves too hard, then bail
12206 // out with UseBuildVector set.
12207 bool UseBuildVector = false;
12208 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
12209 unsigned LaneStart = l * NumLaneElems;
12210 for (unsigned i = 0; i != NumLaneElems; ++i) {
12211 // The mask element. This indexes into the input.
12212 int Idx = SVOp->getMaskElt(i+LaneStart);
12214 // the mask element does not index into any input vector.
12215 Mask.push_back(-1);
12219 // The input vector this mask element indexes into.
12220 int Input = Idx / NumLaneElems;
12222 // Turn the index into an offset from the start of the input vector.
12223 Idx -= Input * NumLaneElems;
12225 // Find or create a shuffle vector operand to hold this input.
12227 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
12228 if (InputUsed[OpNo] == Input)
12229 // This input vector is already an operand.
12231 if (InputUsed[OpNo] < 0) {
12232 // Create a new operand for this input vector.
12233 InputUsed[OpNo] = Input;
12238 if (OpNo >= array_lengthof(InputUsed)) {
12239 // More than two input vectors used! Give up on trying to create a
12240 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
12241 UseBuildVector = true;
12245 // Add the mask index for the new shuffle vector.
12246 Mask.push_back(Idx + OpNo * NumLaneElems);
12249 if (UseBuildVector) {
12250 SmallVector<SDValue, 16> SVOps;
12251 for (unsigned i = 0; i != NumLaneElems; ++i) {
12252 // The mask element. This indexes into the input.
12253 int Idx = SVOp->getMaskElt(i+LaneStart);
12255 SVOps.push_back(DAG.getUNDEF(EltVT));
12259 // The input vector this mask element indexes into.
12260 int Input = Idx / NumElems;
12262 // Turn the index into an offset from the start of the input vector.
12263 Idx -= Input * NumElems;
12265 // Extract the vector element by hand.
12266 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
12267 SVOp->getOperand(Input),
12268 DAG.getIntPtrConstant(Idx)));
12271 // Construct the output using a BUILD_VECTOR.
12272 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
12273 } else if (InputUsed[0] < 0) {
12274 // No input vectors were used! The result is undefined.
12275 Output[l] = DAG.getUNDEF(NVT);
12277 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
12278 (InputUsed[0] % 2) * NumLaneElems,
12280 // If only one input was used, use an undefined vector for the other.
12281 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
12282 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
12283 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
12284 // At least one input vector was used. Create a new shuffle vector.
12285 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
12291 // Concatenate the result back
12292 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
12295 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
12296 /// 4 elements, and match them with several different shuffle types.
12298 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12299 SDValue V1 = SVOp->getOperand(0);
12300 SDValue V2 = SVOp->getOperand(1);
12302 MVT VT = SVOp->getSimpleValueType(0);
12304 assert(VT.is128BitVector() && "Unsupported vector size");
12306 std::pair<int, int> Locs[4];
12307 int Mask1[] = { -1, -1, -1, -1 };
12308 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
12310 unsigned NumHi = 0;
12311 unsigned NumLo = 0;
12312 for (unsigned i = 0; i != 4; ++i) {
12313 int Idx = PermMask[i];
12315 Locs[i] = std::make_pair(-1, -1);
12317 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
12319 Locs[i] = std::make_pair(0, NumLo);
12320 Mask1[NumLo] = Idx;
12323 Locs[i] = std::make_pair(1, NumHi);
12325 Mask1[2+NumHi] = Idx;
12331 if (NumLo <= 2 && NumHi <= 2) {
12332 // If no more than two elements come from either vector. This can be
12333 // implemented with two shuffles. First shuffle gather the elements.
12334 // The second shuffle, which takes the first shuffle as both of its
12335 // vector operands, put the elements into the right order.
12336 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12338 int Mask2[] = { -1, -1, -1, -1 };
12340 for (unsigned i = 0; i != 4; ++i)
12341 if (Locs[i].first != -1) {
12342 unsigned Idx = (i < 2) ? 0 : 4;
12343 Idx += Locs[i].first * 2 + Locs[i].second;
12347 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
12350 if (NumLo == 3 || NumHi == 3) {
12351 // Otherwise, we must have three elements from one vector, call it X, and
12352 // one element from the other, call it Y. First, use a shufps to build an
12353 // intermediate vector with the one element from Y and the element from X
12354 // that will be in the same half in the final destination (the indexes don't
12355 // matter). Then, use a shufps to build the final vector, taking the half
12356 // containing the element from Y from the intermediate, and the other half
12359 // Normalize it so the 3 elements come from V1.
12360 CommuteVectorShuffleMask(PermMask, 4);
12364 // Find the element from V2.
12366 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
12367 int Val = PermMask[HiIndex];
12374 Mask1[0] = PermMask[HiIndex];
12376 Mask1[2] = PermMask[HiIndex^1];
12378 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12380 if (HiIndex >= 2) {
12381 Mask1[0] = PermMask[0];
12382 Mask1[1] = PermMask[1];
12383 Mask1[2] = HiIndex & 1 ? 6 : 4;
12384 Mask1[3] = HiIndex & 1 ? 4 : 6;
12385 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12388 Mask1[0] = HiIndex & 1 ? 2 : 0;
12389 Mask1[1] = HiIndex & 1 ? 0 : 2;
12390 Mask1[2] = PermMask[2];
12391 Mask1[3] = PermMask[3];
12396 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
12399 // Break it into (shuffle shuffle_hi, shuffle_lo).
12400 int LoMask[] = { -1, -1, -1, -1 };
12401 int HiMask[] = { -1, -1, -1, -1 };
12403 int *MaskPtr = LoMask;
12404 unsigned MaskIdx = 0;
12405 unsigned LoIdx = 0;
12406 unsigned HiIdx = 2;
12407 for (unsigned i = 0; i != 4; ++i) {
12414 int Idx = PermMask[i];
12416 Locs[i] = std::make_pair(-1, -1);
12417 } else if (Idx < 4) {
12418 Locs[i] = std::make_pair(MaskIdx, LoIdx);
12419 MaskPtr[LoIdx] = Idx;
12422 Locs[i] = std::make_pair(MaskIdx, HiIdx);
12423 MaskPtr[HiIdx] = Idx;
12428 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
12429 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
12430 int MaskOps[] = { -1, -1, -1, -1 };
12431 for (unsigned i = 0; i != 4; ++i)
12432 if (Locs[i].first != -1)
12433 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
12434 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
12437 static bool MayFoldVectorLoad(SDValue V) {
12438 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12439 V = V.getOperand(0);
12441 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12442 V = V.getOperand(0);
12443 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12444 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12445 // BUILD_VECTOR (load), undef
12446 V = V.getOperand(0);
12448 return MayFoldLoad(V);
12452 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12453 MVT VT = Op.getSimpleValueType();
12455 // Canonicalize to v2f64.
12456 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12457 return DAG.getNode(ISD::BITCAST, dl, VT,
12458 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12463 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12465 SDValue V1 = Op.getOperand(0);
12466 SDValue V2 = Op.getOperand(1);
12467 MVT VT = Op.getSimpleValueType();
12469 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12471 if (HasSSE2 && VT == MVT::v2f64)
12472 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12474 // v4f32 or v4i32: canonicalize to v4f32 (which is legal for SSE1)
12475 return DAG.getNode(ISD::BITCAST, dl, VT,
12476 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12477 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12478 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12482 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12483 SDValue V1 = Op.getOperand(0);
12484 SDValue V2 = Op.getOperand(1);
12485 MVT VT = Op.getSimpleValueType();
12487 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12488 "unsupported shuffle type");
12490 if (V2.getOpcode() == ISD::UNDEF)
12494 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12498 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12499 SDValue V1 = Op.getOperand(0);
12500 SDValue V2 = Op.getOperand(1);
12501 MVT VT = Op.getSimpleValueType();
12502 unsigned NumElems = VT.getVectorNumElements();
12504 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12505 // operand of these instructions is only memory, so check if there's a
12506 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12508 bool CanFoldLoad = false;
12510 // Trivial case, when V2 comes from a load.
12511 if (MayFoldVectorLoad(V2))
12512 CanFoldLoad = true;
12514 // When V1 is a load, it can be folded later into a store in isel, example:
12515 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12517 // (MOVLPSmr addr:$src1, VR128:$src2)
12518 // So, recognize this potential and also use MOVLPS or MOVLPD
12519 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12520 CanFoldLoad = true;
12522 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12524 if (HasSSE2 && NumElems == 2)
12525 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12528 // If we don't care about the second element, proceed to use movss.
12529 if (SVOp->getMaskElt(1) != -1)
12530 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12533 // movl and movlp will both match v2i64, but v2i64 is never matched by
12534 // movl earlier because we make it strict to avoid messing with the movlp load
12535 // folding logic (see the code above getMOVLP call). Match it here then,
12536 // this is horrible, but will stay like this until we move all shuffle
12537 // matching to x86 specific nodes. Note that for the 1st condition all
12538 // types are matched with movsd.
12540 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12541 // as to remove this logic from here, as much as possible
12542 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12543 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12544 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12547 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12549 // Invert the operand order and use SHUFPS to match it.
12550 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12551 getShuffleSHUFImmediate(SVOp), DAG);
12554 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12555 SelectionDAG &DAG) {
12557 MVT VT = Load->getSimpleValueType(0);
12558 MVT EVT = VT.getVectorElementType();
12559 SDValue Addr = Load->getOperand(1);
12560 SDValue NewAddr = DAG.getNode(
12561 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12562 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12565 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12566 DAG.getMachineFunction().getMachineMemOperand(
12567 Load->getMemOperand(), 0, EVT.getStoreSize()));
12571 // It is only safe to call this function if isINSERTPSMask is true for
12572 // this shufflevector mask.
12573 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12574 SelectionDAG &DAG) {
12575 // Generate an insertps instruction when inserting an f32 from memory onto a
12576 // v4f32 or when copying a member from one v4f32 to another.
12577 // We also use it for transferring i32 from one register to another,
12578 // since it simply copies the same bits.
12579 // If we're transferring an i32 from memory to a specific element in a
12580 // register, we output a generic DAG that will match the PINSRD
12582 MVT VT = SVOp->getSimpleValueType(0);
12583 MVT EVT = VT.getVectorElementType();
12584 SDValue V1 = SVOp->getOperand(0);
12585 SDValue V2 = SVOp->getOperand(1);
12586 auto Mask = SVOp->getMask();
12587 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12588 "unsupported vector type for insertps/pinsrd");
12590 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12591 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12592 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12596 unsigned DestIndex;
12600 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12603 // If we have 1 element from each vector, we have to check if we're
12604 // changing V1's element's place. If so, we're done. Otherwise, we
12605 // should assume we're changing V2's element's place and behave
12607 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12608 assert(DestIndex <= INT32_MAX && "truncated destination index");
12609 if (FromV1 == FromV2 &&
12610 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12614 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12617 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12618 "More than one element from V1 and from V2, or no elements from one "
12619 "of the vectors. This case should not have returned true from "
12624 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12627 // Get an index into the source vector in the range [0,4) (the mask is
12628 // in the range [0,8) because it can address V1 and V2)
12629 unsigned SrcIndex = Mask[DestIndex] % 4;
12630 if (MayFoldLoad(From)) {
12631 // Trivial case, when From comes from a load and is only used by the
12632 // shuffle. Make it use insertps from the vector that we need from that
12635 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12636 if (!NewLoad.getNode())
12639 if (EVT == MVT::f32) {
12640 // Create this as a scalar to vector to match the instruction pattern.
12641 SDValue LoadScalarToVector =
12642 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12643 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12644 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12646 } else { // EVT == MVT::i32
12647 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12648 // instruction, to match the PINSRD instruction, which loads an i32 to a
12649 // certain vector element.
12650 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12651 DAG.getConstant(DestIndex, MVT::i32));
12655 // Vector-element-to-vector
12656 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12657 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12660 // Reduce a vector shuffle to zext.
12661 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12662 SelectionDAG &DAG) {
12663 // PMOVZX is only available from SSE41.
12664 if (!Subtarget->hasSSE41())
12667 MVT VT = Op.getSimpleValueType();
12669 // Only AVX2 support 256-bit vector integer extending.
12670 if (!Subtarget->hasInt256() && VT.is256BitVector())
12673 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12675 SDValue V1 = Op.getOperand(0);
12676 SDValue V2 = Op.getOperand(1);
12677 unsigned NumElems = VT.getVectorNumElements();
12679 // Extending is an unary operation and the element type of the source vector
12680 // won't be equal to or larger than i64.
12681 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12682 VT.getVectorElementType() == MVT::i64)
12685 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12686 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12687 while ((1U << Shift) < NumElems) {
12688 if (SVOp->getMaskElt(1U << Shift) == 1)
12691 // The maximal ratio is 8, i.e. from i8 to i64.
12696 // Check the shuffle mask.
12697 unsigned Mask = (1U << Shift) - 1;
12698 for (unsigned i = 0; i != NumElems; ++i) {
12699 int EltIdx = SVOp->getMaskElt(i);
12700 if ((i & Mask) != 0 && EltIdx != -1)
12702 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12706 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12707 MVT NeVT = MVT::getIntegerVT(NBits);
12708 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12710 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12713 return DAG.getNode(ISD::BITCAST, DL, VT,
12714 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12717 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12718 SelectionDAG &DAG) {
12719 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12720 MVT VT = Op.getSimpleValueType();
12722 SDValue V1 = Op.getOperand(0);
12723 SDValue V2 = Op.getOperand(1);
12725 if (isZeroShuffle(SVOp))
12726 return getZeroVector(VT, Subtarget, DAG, dl);
12728 // Handle splat operations
12729 if (SVOp->isSplat()) {
12730 // Use vbroadcast whenever the splat comes from a foldable load
12731 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12732 if (Broadcast.getNode())
12736 // Check integer expanding shuffles.
12737 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12738 if (NewOp.getNode())
12741 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12743 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12744 VT == MVT::v32i8) {
12745 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12746 if (NewOp.getNode())
12747 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12748 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12749 // FIXME: Figure out a cleaner way to do this.
12750 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12751 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12752 if (NewOp.getNode()) {
12753 MVT NewVT = NewOp.getSimpleValueType();
12754 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12755 NewVT, true, false))
12756 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12759 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12760 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12761 if (NewOp.getNode()) {
12762 MVT NewVT = NewOp.getSimpleValueType();
12763 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12764 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12773 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12774 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12775 SDValue V1 = Op.getOperand(0);
12776 SDValue V2 = Op.getOperand(1);
12777 MVT VT = Op.getSimpleValueType();
12779 unsigned NumElems = VT.getVectorNumElements();
12780 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12781 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12782 bool V1IsSplat = false;
12783 bool V2IsSplat = false;
12784 bool HasSSE2 = Subtarget->hasSSE2();
12785 bool HasFp256 = Subtarget->hasFp256();
12786 bool HasInt256 = Subtarget->hasInt256();
12787 MachineFunction &MF = DAG.getMachineFunction();
12789 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
12791 // Check if we should use the experimental vector shuffle lowering. If so,
12792 // delegate completely to that code path.
12793 if (ExperimentalVectorShuffleLowering)
12794 return lowerVectorShuffle(Op, Subtarget, DAG);
12796 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12798 if (V1IsUndef && V2IsUndef)
12799 return DAG.getUNDEF(VT);
12801 // When we create a shuffle node we put the UNDEF node to second operand,
12802 // but in some cases the first operand may be transformed to UNDEF.
12803 // In this case we should just commute the node.
12805 return DAG.getCommutedVectorShuffle(*SVOp);
12807 // Vector shuffle lowering takes 3 steps:
12809 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12810 // narrowing and commutation of operands should be handled.
12811 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12813 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12814 // so the shuffle can be broken into other shuffles and the legalizer can
12815 // try the lowering again.
12817 // The general idea is that no vector_shuffle operation should be left to
12818 // be matched during isel, all of them must be converted to a target specific
12821 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12822 // narrowing and commutation of operands should be handled. The actual code
12823 // doesn't include all of those, work in progress...
12824 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12825 if (NewOp.getNode())
12828 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12830 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12831 // unpckh_undef). Only use pshufd if speed is more important than size.
12832 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12833 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12834 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12835 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12837 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12838 V2IsUndef && MayFoldVectorLoad(V1))
12839 return getMOVDDup(Op, dl, V1, DAG);
12841 if (isMOVHLPS_v_undef_Mask(M, VT))
12842 return getMOVHighToLow(Op, dl, DAG);
12844 // Use to match splats
12845 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12846 (VT == MVT::v2f64 || VT == MVT::v2i64))
12847 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12849 if (isPSHUFDMask(M, VT)) {
12850 // The actual implementation will match the mask in the if above and then
12851 // during isel it can match several different instructions, not only pshufd
12852 // as its name says, sad but true, emulate the behavior for now...
12853 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12854 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12856 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12858 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12859 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12861 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12862 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12865 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12869 if (isPALIGNRMask(M, VT, Subtarget))
12870 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12871 getShufflePALIGNRImmediate(SVOp),
12874 if (isVALIGNMask(M, VT, Subtarget))
12875 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12876 getShuffleVALIGNImmediate(SVOp),
12879 // Check if this can be converted into a logical shift.
12880 bool isLeft = false;
12881 unsigned ShAmt = 0;
12883 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12884 if (isShift && ShVal.hasOneUse()) {
12885 // If the shifted value has multiple uses, it may be cheaper to use
12886 // v_set0 + movlhps or movhlps, etc.
12887 MVT EltVT = VT.getVectorElementType();
12888 ShAmt *= EltVT.getSizeInBits();
12889 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12892 if (isMOVLMask(M, VT)) {
12893 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12894 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12895 if (!isMOVLPMask(M, VT)) {
12896 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12897 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12899 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12900 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12904 // FIXME: fold these into legal mask.
12905 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12906 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12908 if (isMOVHLPSMask(M, VT))
12909 return getMOVHighToLow(Op, dl, DAG);
12911 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12912 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12914 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12915 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12917 if (isMOVLPMask(M, VT))
12918 return getMOVLP(Op, dl, DAG, HasSSE2);
12920 if (ShouldXformToMOVHLPS(M, VT) ||
12921 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12922 return DAG.getCommutedVectorShuffle(*SVOp);
12925 // No better options. Use a vshldq / vsrldq.
12926 MVT EltVT = VT.getVectorElementType();
12927 ShAmt *= EltVT.getSizeInBits();
12928 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12931 bool Commuted = false;
12932 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12933 // 1,1,1,1 -> v8i16 though.
12934 BitVector UndefElements;
12935 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12936 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12938 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12939 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12942 // Canonicalize the splat or undef, if present, to be on the RHS.
12943 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12944 CommuteVectorShuffleMask(M, NumElems);
12946 std::swap(V1IsSplat, V2IsSplat);
12950 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12951 // Shuffling low element of v1 into undef, just return v1.
12954 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12955 // the instruction selector will not match, so get a canonical MOVL with
12956 // swapped operands to undo the commute.
12957 return getMOVL(DAG, dl, VT, V2, V1);
12960 if (isUNPCKLMask(M, VT, HasInt256))
12961 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12963 if (isUNPCKHMask(M, VT, HasInt256))
12964 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12967 // Normalize mask so all entries that point to V2 points to its first
12968 // element then try to match unpck{h|l} again. If match, return a
12969 // new vector_shuffle with the corrected mask.p
12970 SmallVector<int, 8> NewMask(M.begin(), M.end());
12971 NormalizeMask(NewMask, NumElems);
12972 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12973 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12974 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12975 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12979 // Commute is back and try unpck* again.
12980 // FIXME: this seems wrong.
12981 CommuteVectorShuffleMask(M, NumElems);
12983 std::swap(V1IsSplat, V2IsSplat);
12985 if (isUNPCKLMask(M, VT, HasInt256))
12986 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12988 if (isUNPCKHMask(M, VT, HasInt256))
12989 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12992 // Normalize the node to match x86 shuffle ops if needed
12993 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12994 return DAG.getCommutedVectorShuffle(*SVOp);
12996 // The checks below are all present in isShuffleMaskLegal, but they are
12997 // inlined here right now to enable us to directly emit target specific
12998 // nodes, and remove one by one until they don't return Op anymore.
13000 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
13001 SVOp->getSplatIndex() == 0 && V2IsUndef) {
13002 if (VT == MVT::v2f64 || VT == MVT::v2i64)
13003 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
13006 if (isPSHUFHWMask(M, VT, HasInt256))
13007 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
13008 getShufflePSHUFHWImmediate(SVOp),
13011 if (isPSHUFLWMask(M, VT, HasInt256))
13012 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
13013 getShufflePSHUFLWImmediate(SVOp),
13016 unsigned MaskValue;
13017 if (isBlendMask(M, VT, Subtarget->hasSSE41(), HasInt256, &MaskValue))
13018 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
13020 if (isSHUFPMask(M, VT))
13021 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
13022 getShuffleSHUFImmediate(SVOp), DAG);
13024 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
13025 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
13026 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
13027 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
13029 //===--------------------------------------------------------------------===//
13030 // Generate target specific nodes for 128 or 256-bit shuffles only
13031 // supported in the AVX instruction set.
13034 // Handle VMOVDDUPY permutations
13035 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
13036 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
13038 // Handle VPERMILPS/D* permutations
13039 if (isVPERMILPMask(M, VT)) {
13040 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
13041 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
13042 getShuffleSHUFImmediate(SVOp), DAG);
13043 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
13044 getShuffleSHUFImmediate(SVOp), DAG);
13048 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
13049 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
13050 Idx*(NumElems/2), DAG, dl);
13052 // Handle VPERM2F128/VPERM2I128 permutations
13053 if (isVPERM2X128Mask(M, VT, HasFp256))
13054 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
13055 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
13057 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
13058 return getINSERTPS(SVOp, dl, DAG);
13061 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
13062 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
13064 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
13065 VT.is512BitVector()) {
13066 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
13067 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
13068 SmallVector<SDValue, 16> permclMask;
13069 for (unsigned i = 0; i != NumElems; ++i) {
13070 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
13073 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
13075 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
13076 return DAG.getNode(X86ISD::VPERMV, dl, VT,
13077 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
13078 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
13079 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
13082 //===--------------------------------------------------------------------===//
13083 // Since no target specific shuffle was selected for this generic one,
13084 // lower it into other known shuffles. FIXME: this isn't true yet, but
13085 // this is the plan.
13088 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
13089 if (VT == MVT::v8i16) {
13090 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
13091 if (NewOp.getNode())
13095 if (VT == MVT::v16i16 && HasInt256) {
13096 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
13097 if (NewOp.getNode())
13101 if (VT == MVT::v16i8) {
13102 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
13103 if (NewOp.getNode())
13107 if (VT == MVT::v32i8) {
13108 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
13109 if (NewOp.getNode())
13113 // Handle all 128-bit wide vectors with 4 elements, and match them with
13114 // several different shuffle types.
13115 if (NumElems == 4 && VT.is128BitVector())
13116 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
13118 // Handle general 256-bit shuffles
13119 if (VT.is256BitVector())
13120 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
13125 // This function assumes its argument is a BUILD_VECTOR of constants or
13126 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
13128 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
13129 unsigned &MaskValue) {
13131 unsigned NumElems = BuildVector->getNumOperands();
13132 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
13133 unsigned NumLanes = (NumElems - 1) / 8 + 1;
13134 unsigned NumElemsInLane = NumElems / NumLanes;
13136 // Blend for v16i16 should be symetric for the both lanes.
13137 for (unsigned i = 0; i < NumElemsInLane; ++i) {
13138 SDValue EltCond = BuildVector->getOperand(i);
13139 SDValue SndLaneEltCond =
13140 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
13142 int Lane1Cond = -1, Lane2Cond = -1;
13143 if (isa<ConstantSDNode>(EltCond))
13144 Lane1Cond = !isZero(EltCond);
13145 if (isa<ConstantSDNode>(SndLaneEltCond))
13146 Lane2Cond = !isZero(SndLaneEltCond);
13148 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
13149 // Lane1Cond != 0, means we want the first argument.
13150 // Lane1Cond == 0, means we want the second argument.
13151 // The encoding of this argument is 0 for the first argument, 1
13152 // for the second. Therefore, invert the condition.
13153 MaskValue |= !Lane1Cond << i;
13154 else if (Lane1Cond < 0)
13155 MaskValue |= !Lane2Cond << i;
13162 /// \brief Try to lower a VSELECT instruction to an immediate-controlled blend
13164 static SDValue lowerVSELECTtoBLENDI(SDValue Op, const X86Subtarget *Subtarget,
13165 SelectionDAG &DAG) {
13166 SDValue Cond = Op.getOperand(0);
13167 SDValue LHS = Op.getOperand(1);
13168 SDValue RHS = Op.getOperand(2);
13170 MVT VT = Op.getSimpleValueType();
13171 MVT EltVT = VT.getVectorElementType();
13172 unsigned NumElems = VT.getVectorNumElements();
13174 // There is no blend with immediate in AVX-512.
13175 if (VT.is512BitVector())
13178 if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
13180 if (!Subtarget->hasInt256() && VT == MVT::v16i16)
13183 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
13186 // Check the mask for BLEND and build the value.
13187 unsigned MaskValue = 0;
13188 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
13191 // Convert i32 vectors to floating point if it is not AVX2.
13192 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
13194 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
13195 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
13197 LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
13198 RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
13201 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
13202 DAG.getConstant(MaskValue, MVT::i32));
13203 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
13206 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
13207 // A vselect where all conditions and data are constants can be optimized into
13208 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
13209 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
13210 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
13211 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
13214 SDValue BlendOp = lowerVSELECTtoBLENDI(Op, Subtarget, DAG);
13215 if (BlendOp.getNode())
13218 // Some types for vselect were previously set to Expand, not Legal or
13219 // Custom. Return an empty SDValue so we fall-through to Expand, after
13220 // the Custom lowering phase.
13221 MVT VT = Op.getSimpleValueType();
13222 switch (VT.SimpleTy) {
13227 if (Subtarget->hasBWI() && Subtarget->hasVLX())
13232 // We couldn't create a "Blend with immediate" node.
13233 // This node should still be legal, but we'll have to emit a blendv*
13238 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
13239 MVT VT = Op.getSimpleValueType();
13242 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
13245 if (VT.getSizeInBits() == 8) {
13246 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
13247 Op.getOperand(0), Op.getOperand(1));
13248 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13249 DAG.getValueType(VT));
13250 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13253 if (VT.getSizeInBits() == 16) {
13254 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13255 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
13257 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13258 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13259 DAG.getNode(ISD::BITCAST, dl,
13262 Op.getOperand(1)));
13263 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
13264 Op.getOperand(0), Op.getOperand(1));
13265 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13266 DAG.getValueType(VT));
13267 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13270 if (VT == MVT::f32) {
13271 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
13272 // the result back to FR32 register. It's only worth matching if the
13273 // result has a single use which is a store or a bitcast to i32. And in
13274 // the case of a store, it's not worth it if the index is a constant 0,
13275 // because a MOVSSmr can be used instead, which is smaller and faster.
13276 if (!Op.hasOneUse())
13278 SDNode *User = *Op.getNode()->use_begin();
13279 if ((User->getOpcode() != ISD::STORE ||
13280 (isa<ConstantSDNode>(Op.getOperand(1)) &&
13281 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
13282 (User->getOpcode() != ISD::BITCAST ||
13283 User->getValueType(0) != MVT::i32))
13285 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13286 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
13289 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
13292 if (VT == MVT::i32 || VT == MVT::i64) {
13293 // ExtractPS/pextrq works with constant index.
13294 if (isa<ConstantSDNode>(Op.getOperand(1)))
13300 /// Extract one bit from mask vector, like v16i1 or v8i1.
13301 /// AVX-512 feature.
13303 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
13304 SDValue Vec = Op.getOperand(0);
13306 MVT VecVT = Vec.getSimpleValueType();
13307 SDValue Idx = Op.getOperand(1);
13308 MVT EltVT = Op.getSimpleValueType();
13310 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
13311 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
13312 "Unexpected vector type in ExtractBitFromMaskVector");
13314 // variable index can't be handled in mask registers,
13315 // extend vector to VR512
13316 if (!isa<ConstantSDNode>(Idx)) {
13317 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13318 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
13319 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
13320 ExtVT.getVectorElementType(), Ext, Idx);
13321 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
13324 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13325 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13326 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
13327 rc = getRegClassFor(MVT::v16i1);
13328 unsigned MaxSift = rc->getSize()*8 - 1;
13329 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
13330 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13331 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
13332 DAG.getConstant(MaxSift, MVT::i8));
13333 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
13334 DAG.getIntPtrConstant(0));
13338 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
13339 SelectionDAG &DAG) const {
13341 SDValue Vec = Op.getOperand(0);
13342 MVT VecVT = Vec.getSimpleValueType();
13343 SDValue Idx = Op.getOperand(1);
13345 if (Op.getSimpleValueType() == MVT::i1)
13346 return ExtractBitFromMaskVector(Op, DAG);
13348 if (!isa<ConstantSDNode>(Idx)) {
13349 if (VecVT.is512BitVector() ||
13350 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
13351 VecVT.getVectorElementType().getSizeInBits() == 32)) {
13354 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
13355 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
13356 MaskEltVT.getSizeInBits());
13358 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
13359 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
13360 getZeroVector(MaskVT, Subtarget, DAG, dl),
13361 Idx, DAG.getConstant(0, getPointerTy()));
13362 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
13363 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
13364 Perm, DAG.getConstant(0, getPointerTy()));
13369 // If this is a 256-bit vector result, first extract the 128-bit vector and
13370 // then extract the element from the 128-bit vector.
13371 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
13373 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13374 // Get the 128-bit vector.
13375 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
13376 MVT EltVT = VecVT.getVectorElementType();
13378 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
13380 //if (IdxVal >= NumElems/2)
13381 // IdxVal -= NumElems/2;
13382 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
13383 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
13384 DAG.getConstant(IdxVal, MVT::i32));
13387 assert(VecVT.is128BitVector() && "Unexpected vector length");
13389 if (Subtarget->hasSSE41()) {
13390 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
13395 MVT VT = Op.getSimpleValueType();
13396 // TODO: handle v16i8.
13397 if (VT.getSizeInBits() == 16) {
13398 SDValue Vec = Op.getOperand(0);
13399 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13401 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13402 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13403 DAG.getNode(ISD::BITCAST, dl,
13405 Op.getOperand(1)));
13406 // Transform it so it match pextrw which produces a 32-bit result.
13407 MVT EltVT = MVT::i32;
13408 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
13409 Op.getOperand(0), Op.getOperand(1));
13410 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
13411 DAG.getValueType(VT));
13412 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13415 if (VT.getSizeInBits() == 32) {
13416 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13420 // SHUFPS the element to the lowest double word, then movss.
13421 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
13422 MVT VVT = Op.getOperand(0).getSimpleValueType();
13423 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13424 DAG.getUNDEF(VVT), Mask);
13425 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13426 DAG.getIntPtrConstant(0));
13429 if (VT.getSizeInBits() == 64) {
13430 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
13431 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
13432 // to match extract_elt for f64.
13433 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13437 // UNPCKHPD the element to the lowest double word, then movsd.
13438 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
13439 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
13440 int Mask[2] = { 1, -1 };
13441 MVT VVT = Op.getOperand(0).getSimpleValueType();
13442 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13443 DAG.getUNDEF(VVT), Mask);
13444 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13445 DAG.getIntPtrConstant(0));
13451 /// Insert one bit to mask vector, like v16i1 or v8i1.
13452 /// AVX-512 feature.
13454 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13456 SDValue Vec = Op.getOperand(0);
13457 SDValue Elt = Op.getOperand(1);
13458 SDValue Idx = Op.getOperand(2);
13459 MVT VecVT = Vec.getSimpleValueType();
13461 if (!isa<ConstantSDNode>(Idx)) {
13462 // Non constant index. Extend source and destination,
13463 // insert element and then truncate the result.
13464 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13465 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13466 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13467 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13468 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13469 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13472 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13473 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13474 if (Vec.getOpcode() == ISD::UNDEF)
13475 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13476 DAG.getConstant(IdxVal, MVT::i8));
13477 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13478 unsigned MaxSift = rc->getSize()*8 - 1;
13479 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13480 DAG.getConstant(MaxSift, MVT::i8));
13481 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13482 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13483 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13486 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13487 SelectionDAG &DAG) const {
13488 MVT VT = Op.getSimpleValueType();
13489 MVT EltVT = VT.getVectorElementType();
13491 if (EltVT == MVT::i1)
13492 return InsertBitToMaskVector(Op, DAG);
13495 SDValue N0 = Op.getOperand(0);
13496 SDValue N1 = Op.getOperand(1);
13497 SDValue N2 = Op.getOperand(2);
13498 if (!isa<ConstantSDNode>(N2))
13500 auto *N2C = cast<ConstantSDNode>(N2);
13501 unsigned IdxVal = N2C->getZExtValue();
13503 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13504 // into that, and then insert the subvector back into the result.
13505 if (VT.is256BitVector() || VT.is512BitVector()) {
13506 // Get the desired 128-bit vector half.
13507 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13509 // Insert the element into the desired half.
13510 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13511 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13513 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13514 DAG.getConstant(IdxIn128, MVT::i32));
13516 // Insert the changed part back to the 256-bit vector
13517 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13519 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13521 if (Subtarget->hasSSE41()) {
13522 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13524 if (VT == MVT::v8i16) {
13525 Opc = X86ISD::PINSRW;
13527 assert(VT == MVT::v16i8);
13528 Opc = X86ISD::PINSRB;
13531 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13533 if (N1.getValueType() != MVT::i32)
13534 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13535 if (N2.getValueType() != MVT::i32)
13536 N2 = DAG.getIntPtrConstant(IdxVal);
13537 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13540 if (EltVT == MVT::f32) {
13541 // Bits [7:6] of the constant are the source select. This will always be
13542 // zero here. The DAG Combiner may combine an extract_elt index into
13544 // bits. For example (insert (extract, 3), 2) could be matched by
13546 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13547 // Bits [5:4] of the constant are the destination select. This is the
13548 // value of the incoming immediate.
13549 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13550 // combine either bitwise AND or insert of float 0.0 to set these bits.
13551 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13552 // Create this as a scalar to vector..
13553 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13554 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13557 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13558 // PINSR* works with constant index.
13563 if (EltVT == MVT::i8)
13566 if (EltVT.getSizeInBits() == 16) {
13567 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13568 // as its second argument.
13569 if (N1.getValueType() != MVT::i32)
13570 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13571 if (N2.getValueType() != MVT::i32)
13572 N2 = DAG.getIntPtrConstant(IdxVal);
13573 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13578 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13580 MVT OpVT = Op.getSimpleValueType();
13582 // If this is a 256-bit vector result, first insert into a 128-bit
13583 // vector and then insert into the 256-bit vector.
13584 if (!OpVT.is128BitVector()) {
13585 // Insert into a 128-bit vector.
13586 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13587 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13588 OpVT.getVectorNumElements() / SizeFactor);
13590 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13592 // Insert the 128-bit vector.
13593 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13596 if (OpVT == MVT::v1i64 &&
13597 Op.getOperand(0).getValueType() == MVT::i64)
13598 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13600 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13601 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13602 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13603 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13606 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13607 // a simple subregister reference or explicit instructions to grab
13608 // upper bits of a vector.
13609 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13610 SelectionDAG &DAG) {
13612 SDValue In = Op.getOperand(0);
13613 SDValue Idx = Op.getOperand(1);
13614 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13615 MVT ResVT = Op.getSimpleValueType();
13616 MVT InVT = In.getSimpleValueType();
13618 if (Subtarget->hasFp256()) {
13619 if (ResVT.is128BitVector() &&
13620 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13621 isa<ConstantSDNode>(Idx)) {
13622 return Extract128BitVector(In, IdxVal, DAG, dl);
13624 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13625 isa<ConstantSDNode>(Idx)) {
13626 return Extract256BitVector(In, IdxVal, DAG, dl);
13632 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13633 // simple superregister reference or explicit instructions to insert
13634 // the upper bits of a vector.
13635 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13636 SelectionDAG &DAG) {
13637 if (!Subtarget->hasAVX())
13641 SDValue Vec = Op.getOperand(0);
13642 SDValue SubVec = Op.getOperand(1);
13643 SDValue Idx = Op.getOperand(2);
13645 if (!isa<ConstantSDNode>(Idx))
13648 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13649 MVT OpVT = Op.getSimpleValueType();
13650 MVT SubVecVT = SubVec.getSimpleValueType();
13652 // Fold two 16-byte subvector loads into one 32-byte load:
13653 // (insert_subvector (insert_subvector undef, (load addr), 0),
13654 // (load addr + 16), Elts/2)
13656 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
13657 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
13658 OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
13659 !Subtarget->isUnalignedMem32Slow()) {
13660 SDValue SubVec2 = Vec.getOperand(1);
13661 if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
13662 if (Idx2->getZExtValue() == 0) {
13663 SDValue Ops[] = { SubVec2, SubVec };
13664 SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
13671 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13672 SubVecVT.is128BitVector())
13673 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13675 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
13676 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13681 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13682 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13683 // one of the above mentioned nodes. It has to be wrapped because otherwise
13684 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13685 // be used to form addressing mode. These wrapped nodes will be selected
13688 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13689 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13691 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13692 // global base reg.
13693 unsigned char OpFlag = 0;
13694 unsigned WrapperKind = X86ISD::Wrapper;
13695 CodeModel::Model M = DAG.getTarget().getCodeModel();
13697 if (Subtarget->isPICStyleRIPRel() &&
13698 (M == CodeModel::Small || M == CodeModel::Kernel))
13699 WrapperKind = X86ISD::WrapperRIP;
13700 else if (Subtarget->isPICStyleGOT())
13701 OpFlag = X86II::MO_GOTOFF;
13702 else if (Subtarget->isPICStyleStubPIC())
13703 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13705 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13706 CP->getAlignment(),
13707 CP->getOffset(), OpFlag);
13709 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13710 // With PIC, the address is actually $g + Offset.
13712 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13713 DAG.getNode(X86ISD::GlobalBaseReg,
13714 SDLoc(), getPointerTy()),
13721 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13722 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13724 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13725 // global base reg.
13726 unsigned char OpFlag = 0;
13727 unsigned WrapperKind = X86ISD::Wrapper;
13728 CodeModel::Model M = DAG.getTarget().getCodeModel();
13730 if (Subtarget->isPICStyleRIPRel() &&
13731 (M == CodeModel::Small || M == CodeModel::Kernel))
13732 WrapperKind = X86ISD::WrapperRIP;
13733 else if (Subtarget->isPICStyleGOT())
13734 OpFlag = X86II::MO_GOTOFF;
13735 else if (Subtarget->isPICStyleStubPIC())
13736 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13738 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13741 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13743 // With PIC, the address is actually $g + Offset.
13745 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13746 DAG.getNode(X86ISD::GlobalBaseReg,
13747 SDLoc(), getPointerTy()),
13754 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13755 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13757 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13758 // global base reg.
13759 unsigned char OpFlag = 0;
13760 unsigned WrapperKind = X86ISD::Wrapper;
13761 CodeModel::Model M = DAG.getTarget().getCodeModel();
13763 if (Subtarget->isPICStyleRIPRel() &&
13764 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13765 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13766 OpFlag = X86II::MO_GOTPCREL;
13767 WrapperKind = X86ISD::WrapperRIP;
13768 } else if (Subtarget->isPICStyleGOT()) {
13769 OpFlag = X86II::MO_GOT;
13770 } else if (Subtarget->isPICStyleStubPIC()) {
13771 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13772 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13773 OpFlag = X86II::MO_DARWIN_NONLAZY;
13776 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13779 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13781 // With PIC, the address is actually $g + Offset.
13782 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13783 !Subtarget->is64Bit()) {
13784 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13785 DAG.getNode(X86ISD::GlobalBaseReg,
13786 SDLoc(), getPointerTy()),
13790 // For symbols that require a load from a stub to get the address, emit the
13792 if (isGlobalStubReference(OpFlag))
13793 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13794 MachinePointerInfo::getGOT(), false, false, false, 0);
13800 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13801 // Create the TargetBlockAddressAddress node.
13802 unsigned char OpFlags =
13803 Subtarget->ClassifyBlockAddressReference();
13804 CodeModel::Model M = DAG.getTarget().getCodeModel();
13805 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13806 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13808 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13811 if (Subtarget->isPICStyleRIPRel() &&
13812 (M == CodeModel::Small || M == CodeModel::Kernel))
13813 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13815 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13817 // With PIC, the address is actually $g + Offset.
13818 if (isGlobalRelativeToPICBase(OpFlags)) {
13819 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13820 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13828 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13829 int64_t Offset, SelectionDAG &DAG) const {
13830 // Create the TargetGlobalAddress node, folding in the constant
13831 // offset if it is legal.
13832 unsigned char OpFlags =
13833 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13834 CodeModel::Model M = DAG.getTarget().getCodeModel();
13836 if (OpFlags == X86II::MO_NO_FLAG &&
13837 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13838 // A direct static reference to a global.
13839 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13842 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13845 if (Subtarget->isPICStyleRIPRel() &&
13846 (M == CodeModel::Small || M == CodeModel::Kernel))
13847 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13849 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13851 // With PIC, the address is actually $g + Offset.
13852 if (isGlobalRelativeToPICBase(OpFlags)) {
13853 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13854 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13858 // For globals that require a load from a stub to get the address, emit the
13860 if (isGlobalStubReference(OpFlags))
13861 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13862 MachinePointerInfo::getGOT(), false, false, false, 0);
13864 // If there was a non-zero offset that we didn't fold, create an explicit
13865 // addition for it.
13867 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13868 DAG.getConstant(Offset, getPointerTy()));
13874 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13875 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13876 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13877 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13881 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13882 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13883 unsigned char OperandFlags, bool LocalDynamic = false) {
13884 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13885 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13887 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13888 GA->getValueType(0),
13892 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13896 SDValue Ops[] = { Chain, TGA, *InFlag };
13897 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13899 SDValue Ops[] = { Chain, TGA };
13900 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13903 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13904 MFI->setAdjustsStack(true);
13905 MFI->setHasCalls(true);
13907 SDValue Flag = Chain.getValue(1);
13908 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13911 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13913 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13916 SDLoc dl(GA); // ? function entry point might be better
13917 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13918 DAG.getNode(X86ISD::GlobalBaseReg,
13919 SDLoc(), PtrVT), InFlag);
13920 InFlag = Chain.getValue(1);
13922 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13925 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13927 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13929 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13930 X86::RAX, X86II::MO_TLSGD);
13933 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13939 // Get the start address of the TLS block for this module.
13940 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13941 .getInfo<X86MachineFunctionInfo>();
13942 MFI->incNumLocalDynamicTLSAccesses();
13946 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13947 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13950 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13951 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13952 InFlag = Chain.getValue(1);
13953 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13954 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13957 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13961 unsigned char OperandFlags = X86II::MO_DTPOFF;
13962 unsigned WrapperKind = X86ISD::Wrapper;
13963 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13964 GA->getValueType(0),
13965 GA->getOffset(), OperandFlags);
13966 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13968 // Add x@dtpoff with the base.
13969 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13972 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13973 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13974 const EVT PtrVT, TLSModel::Model model,
13975 bool is64Bit, bool isPIC) {
13978 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13979 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13980 is64Bit ? 257 : 256));
13982 SDValue ThreadPointer =
13983 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13984 MachinePointerInfo(Ptr), false, false, false, 0);
13986 unsigned char OperandFlags = 0;
13987 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13989 unsigned WrapperKind = X86ISD::Wrapper;
13990 if (model == TLSModel::LocalExec) {
13991 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13992 } else if (model == TLSModel::InitialExec) {
13994 OperandFlags = X86II::MO_GOTTPOFF;
13995 WrapperKind = X86ISD::WrapperRIP;
13997 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
14000 llvm_unreachable("Unexpected model");
14003 // emit "addl x@ntpoff,%eax" (local exec)
14004 // or "addl x@indntpoff,%eax" (initial exec)
14005 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
14007 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
14008 GA->getOffset(), OperandFlags);
14009 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
14011 if (model == TLSModel::InitialExec) {
14012 if (isPIC && !is64Bit) {
14013 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
14014 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
14018 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
14019 MachinePointerInfo::getGOT(), false, false, false, 0);
14022 // The address of the thread local variable is the add of the thread
14023 // pointer with the offset of the variable.
14024 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
14028 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
14030 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
14031 const GlobalValue *GV = GA->getGlobal();
14033 if (Subtarget->isTargetELF()) {
14034 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
14037 case TLSModel::GeneralDynamic:
14038 if (Subtarget->is64Bit())
14039 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
14040 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
14041 case TLSModel::LocalDynamic:
14042 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
14043 Subtarget->is64Bit());
14044 case TLSModel::InitialExec:
14045 case TLSModel::LocalExec:
14046 return LowerToTLSExecModel(
14047 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
14048 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
14050 llvm_unreachable("Unknown TLS model.");
14053 if (Subtarget->isTargetDarwin()) {
14054 // Darwin only has one model of TLS. Lower to that.
14055 unsigned char OpFlag = 0;
14056 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
14057 X86ISD::WrapperRIP : X86ISD::Wrapper;
14059 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
14060 // global base reg.
14061 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
14062 !Subtarget->is64Bit();
14064 OpFlag = X86II::MO_TLVP_PIC_BASE;
14066 OpFlag = X86II::MO_TLVP;
14068 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
14069 GA->getValueType(0),
14070 GA->getOffset(), OpFlag);
14071 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
14073 // With PIC32, the address is actually $g + Offset.
14075 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
14076 DAG.getNode(X86ISD::GlobalBaseReg,
14077 SDLoc(), getPointerTy()),
14080 // Lowering the machine isd will make sure everything is in the right
14082 SDValue Chain = DAG.getEntryNode();
14083 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
14084 SDValue Args[] = { Chain, Offset };
14085 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
14087 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
14088 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
14089 MFI->setAdjustsStack(true);
14091 // And our return value (tls address) is in the standard call return value
14093 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
14094 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
14095 Chain.getValue(1));
14098 if (Subtarget->isTargetKnownWindowsMSVC() ||
14099 Subtarget->isTargetWindowsGNU()) {
14100 // Just use the implicit TLS architecture
14101 // Need to generate someting similar to:
14102 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
14104 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
14105 // mov rcx, qword [rdx+rcx*8]
14106 // mov eax, .tls$:tlsvar
14107 // [rax+rcx] contains the address
14108 // Windows 64bit: gs:0x58
14109 // Windows 32bit: fs:__tls_array
14112 SDValue Chain = DAG.getEntryNode();
14114 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
14115 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
14116 // use its literal value of 0x2C.
14117 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
14118 ? Type::getInt8PtrTy(*DAG.getContext(),
14120 : Type::getInt32PtrTy(*DAG.getContext(),
14124 Subtarget->is64Bit()
14125 ? DAG.getIntPtrConstant(0x58)
14126 : (Subtarget->isTargetWindowsGNU()
14127 ? DAG.getIntPtrConstant(0x2C)
14128 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
14130 SDValue ThreadPointer =
14131 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
14132 MachinePointerInfo(Ptr), false, false, false, 0);
14134 // Load the _tls_index variable
14135 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
14136 if (Subtarget->is64Bit())
14137 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
14138 IDX, MachinePointerInfo(), MVT::i32,
14139 false, false, false, 0);
14141 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
14142 false, false, false, 0);
14144 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
14146 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
14148 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
14149 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
14150 false, false, false, 0);
14152 // Get the offset of start of .tls section
14153 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
14154 GA->getValueType(0),
14155 GA->getOffset(), X86II::MO_SECREL);
14156 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
14158 // The address of the thread local variable is the add of the thread
14159 // pointer with the offset of the variable.
14160 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
14163 llvm_unreachable("TLS not implemented for this target.");
14166 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
14167 /// and take a 2 x i32 value to shift plus a shift amount.
14168 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
14169 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
14170 MVT VT = Op.getSimpleValueType();
14171 unsigned VTBits = VT.getSizeInBits();
14173 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
14174 SDValue ShOpLo = Op.getOperand(0);
14175 SDValue ShOpHi = Op.getOperand(1);
14176 SDValue ShAmt = Op.getOperand(2);
14177 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
14178 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
14180 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14181 DAG.getConstant(VTBits - 1, MVT::i8));
14182 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
14183 DAG.getConstant(VTBits - 1, MVT::i8))
14184 : DAG.getConstant(0, VT);
14186 SDValue Tmp2, Tmp3;
14187 if (Op.getOpcode() == ISD::SHL_PARTS) {
14188 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
14189 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
14191 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
14192 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
14195 // If the shift amount is larger or equal than the width of a part we can't
14196 // rely on the results of shld/shrd. Insert a test and select the appropriate
14197 // values for large shift amounts.
14198 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14199 DAG.getConstant(VTBits, MVT::i8));
14200 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
14201 AndNode, DAG.getConstant(0, MVT::i8));
14204 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
14205 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
14206 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
14208 if (Op.getOpcode() == ISD::SHL_PARTS) {
14209 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14210 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14212 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14213 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14216 SDValue Ops[2] = { Lo, Hi };
14217 return DAG.getMergeValues(Ops, dl);
14220 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
14221 SelectionDAG &DAG) const {
14222 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
14225 if (SrcVT.isVector()) {
14226 if (SrcVT.getVectorElementType() == MVT::i1) {
14227 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
14228 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14229 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
14230 Op.getOperand(0)));
14235 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
14236 "Unknown SINT_TO_FP to lower!");
14238 // These are really Legal; return the operand so the caller accepts it as
14240 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
14242 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
14243 Subtarget->is64Bit()) {
14247 unsigned Size = SrcVT.getSizeInBits()/8;
14248 MachineFunction &MF = DAG.getMachineFunction();
14249 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
14250 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14251 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14253 MachinePointerInfo::getFixedStack(SSFI),
14255 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
14258 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
14260 SelectionDAG &DAG) const {
14264 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
14266 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
14268 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
14270 unsigned ByteSize = SrcVT.getSizeInBits()/8;
14272 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
14273 MachineMemOperand *MMO;
14275 int SSFI = FI->getIndex();
14277 DAG.getMachineFunction()
14278 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14279 MachineMemOperand::MOLoad, ByteSize, ByteSize);
14281 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
14282 StackSlot = StackSlot.getOperand(1);
14284 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
14285 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
14287 Tys, Ops, SrcVT, MMO);
14290 Chain = Result.getValue(1);
14291 SDValue InFlag = Result.getValue(2);
14293 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
14294 // shouldn't be necessary except that RFP cannot be live across
14295 // multiple blocks. When stackifier is fixed, they can be uncoupled.
14296 MachineFunction &MF = DAG.getMachineFunction();
14297 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
14298 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
14299 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14300 Tys = DAG.getVTList(MVT::Other);
14302 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
14304 MachineMemOperand *MMO =
14305 DAG.getMachineFunction()
14306 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14307 MachineMemOperand::MOStore, SSFISize, SSFISize);
14309 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
14310 Ops, Op.getValueType(), MMO);
14311 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
14312 MachinePointerInfo::getFixedStack(SSFI),
14313 false, false, false, 0);
14319 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
14320 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
14321 SelectionDAG &DAG) const {
14322 // This algorithm is not obvious. Here it is what we're trying to output:
14325 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
14326 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
14328 haddpd %xmm0, %xmm0
14330 pshufd $0x4e, %xmm0, %xmm1
14336 LLVMContext *Context = DAG.getContext();
14338 // Build some magic constants.
14339 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
14340 Constant *C0 = ConstantDataVector::get(*Context, CV0);
14341 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
14343 SmallVector<Constant*,2> CV1;
14345 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14346 APInt(64, 0x4330000000000000ULL))));
14348 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14349 APInt(64, 0x4530000000000000ULL))));
14350 Constant *C1 = ConstantVector::get(CV1);
14351 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
14353 // Load the 64-bit value into an XMM register.
14354 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
14356 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
14357 MachinePointerInfo::getConstantPool(),
14358 false, false, false, 16);
14359 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
14360 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
14363 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
14364 MachinePointerInfo::getConstantPool(),
14365 false, false, false, 16);
14366 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
14367 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
14370 if (Subtarget->hasSSE3()) {
14371 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
14372 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
14374 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
14375 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
14377 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
14378 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
14382 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
14383 DAG.getIntPtrConstant(0));
14386 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
14387 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
14388 SelectionDAG &DAG) const {
14390 // FP constant to bias correct the final result.
14391 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
14394 // Load the 32-bit value into an XMM register.
14395 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
14398 // Zero out the upper parts of the register.
14399 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
14401 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14402 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
14403 DAG.getIntPtrConstant(0));
14405 // Or the load with the bias.
14406 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
14407 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14408 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14409 MVT::v2f64, Load)),
14410 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14411 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14412 MVT::v2f64, Bias)));
14413 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14414 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
14415 DAG.getIntPtrConstant(0));
14417 // Subtract the bias.
14418 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
14420 // Handle final rounding.
14421 EVT DestVT = Op.getValueType();
14423 if (DestVT.bitsLT(MVT::f64))
14424 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
14425 DAG.getIntPtrConstant(0));
14426 if (DestVT.bitsGT(MVT::f64))
14427 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
14429 // Handle final rounding.
14433 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
14434 const X86Subtarget &Subtarget) {
14435 // The algorithm is the following:
14436 // #ifdef __SSE4_1__
14437 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14438 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14439 // (uint4) 0x53000000, 0xaa);
14441 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14442 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14444 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14445 // return (float4) lo + fhi;
14448 SDValue V = Op->getOperand(0);
14449 EVT VecIntVT = V.getValueType();
14450 bool Is128 = VecIntVT == MVT::v4i32;
14451 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
14452 // If we convert to something else than the supported type, e.g., to v4f64,
14454 if (VecFloatVT != Op->getValueType(0))
14457 unsigned NumElts = VecIntVT.getVectorNumElements();
14458 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
14459 "Unsupported custom type");
14460 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14462 // In the #idef/#else code, we have in common:
14463 // - The vector of constants:
14469 // Create the splat vector for 0x4b000000.
14470 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14471 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14472 CstLow, CstLow, CstLow, CstLow};
14473 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14474 makeArrayRef(&CstLowArray[0], NumElts));
14475 // Create the splat vector for 0x53000000.
14476 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14477 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14478 CstHigh, CstHigh, CstHigh, CstHigh};
14479 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14480 makeArrayRef(&CstHighArray[0], NumElts));
14482 // Create the right shift.
14483 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14484 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14485 CstShift, CstShift, CstShift, CstShift};
14486 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14487 makeArrayRef(&CstShiftArray[0], NumElts));
14488 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14491 if (Subtarget.hasSSE41()) {
14492 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14493 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14494 SDValue VecCstLowBitcast =
14495 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14496 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14497 // Low will be bitcasted right away, so do not bother bitcasting back to its
14499 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14500 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14501 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14502 // (uint4) 0x53000000, 0xaa);
14503 SDValue VecCstHighBitcast =
14504 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14505 SDValue VecShiftBitcast =
14506 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14507 // High will be bitcasted right away, so do not bother bitcasting back to
14508 // its original type.
14509 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14510 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14512 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14513 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14514 CstMask, CstMask, CstMask);
14515 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14516 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14517 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14519 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14520 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14523 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14524 SDValue CstFAdd = DAG.getConstantFP(
14525 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14526 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14527 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14528 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14529 makeArrayRef(&CstFAddArray[0], NumElts));
14531 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14532 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14534 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14535 // return (float4) lo + fhi;
14536 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14537 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14540 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14541 SelectionDAG &DAG) const {
14542 SDValue N0 = Op.getOperand(0);
14543 MVT SVT = N0.getSimpleValueType();
14546 switch (SVT.SimpleTy) {
14548 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14553 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14554 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14555 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14559 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14561 llvm_unreachable(nullptr);
14564 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14565 SelectionDAG &DAG) const {
14566 SDValue N0 = Op.getOperand(0);
14569 if (Op.getValueType().isVector())
14570 return lowerUINT_TO_FP_vec(Op, DAG);
14572 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14573 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14574 // the optimization here.
14575 if (DAG.SignBitIsZero(N0))
14576 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14578 MVT SrcVT = N0.getSimpleValueType();
14579 MVT DstVT = Op.getSimpleValueType();
14580 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14581 return LowerUINT_TO_FP_i64(Op, DAG);
14582 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14583 return LowerUINT_TO_FP_i32(Op, DAG);
14584 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14587 // Make a 64-bit buffer, and use it to build an FILD.
14588 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14589 if (SrcVT == MVT::i32) {
14590 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14591 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14592 getPointerTy(), StackSlot, WordOff);
14593 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14594 StackSlot, MachinePointerInfo(),
14596 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14597 OffsetSlot, MachinePointerInfo(),
14599 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14603 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14604 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14605 StackSlot, MachinePointerInfo(),
14607 // For i64 source, we need to add the appropriate power of 2 if the input
14608 // was negative. This is the same as the optimization in
14609 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14610 // we must be careful to do the computation in x87 extended precision, not
14611 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14612 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14613 MachineMemOperand *MMO =
14614 DAG.getMachineFunction()
14615 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14616 MachineMemOperand::MOLoad, 8, 8);
14618 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14619 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14620 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14623 APInt FF(32, 0x5F800000ULL);
14625 // Check whether the sign bit is set.
14626 SDValue SignSet = DAG.getSetCC(dl,
14627 getSetCCResultType(*DAG.getContext(), MVT::i64),
14628 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14631 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14632 SDValue FudgePtr = DAG.getConstantPool(
14633 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14636 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14637 SDValue Zero = DAG.getIntPtrConstant(0);
14638 SDValue Four = DAG.getIntPtrConstant(4);
14639 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14641 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14643 // Load the value out, extending it from f32 to f80.
14644 // FIXME: Avoid the extend by constructing the right constant pool?
14645 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14646 FudgePtr, MachinePointerInfo::getConstantPool(),
14647 MVT::f32, false, false, false, 4);
14648 // Extend everything to 80 bits to force it to be done on x87.
14649 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14650 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14653 std::pair<SDValue,SDValue>
14654 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14655 bool IsSigned, bool IsReplace) const {
14658 EVT DstTy = Op.getValueType();
14660 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14661 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14665 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14666 DstTy.getSimpleVT() >= MVT::i16 &&
14667 "Unknown FP_TO_INT to lower!");
14669 // These are really Legal.
14670 if (DstTy == MVT::i32 &&
14671 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14672 return std::make_pair(SDValue(), SDValue());
14673 if (Subtarget->is64Bit() &&
14674 DstTy == MVT::i64 &&
14675 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14676 return std::make_pair(SDValue(), SDValue());
14678 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14679 // stack slot, or into the FTOL runtime function.
14680 MachineFunction &MF = DAG.getMachineFunction();
14681 unsigned MemSize = DstTy.getSizeInBits()/8;
14682 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14683 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14686 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14687 Opc = X86ISD::WIN_FTOL;
14689 switch (DstTy.getSimpleVT().SimpleTy) {
14690 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14691 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14692 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14693 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14696 SDValue Chain = DAG.getEntryNode();
14697 SDValue Value = Op.getOperand(0);
14698 EVT TheVT = Op.getOperand(0).getValueType();
14699 // FIXME This causes a redundant load/store if the SSE-class value is already
14700 // in memory, such as if it is on the callstack.
14701 if (isScalarFPTypeInSSEReg(TheVT)) {
14702 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14703 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14704 MachinePointerInfo::getFixedStack(SSFI),
14706 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14708 Chain, StackSlot, DAG.getValueType(TheVT)
14711 MachineMemOperand *MMO =
14712 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14713 MachineMemOperand::MOLoad, MemSize, MemSize);
14714 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14715 Chain = Value.getValue(1);
14716 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14717 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14720 MachineMemOperand *MMO =
14721 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14722 MachineMemOperand::MOStore, MemSize, MemSize);
14724 if (Opc != X86ISD::WIN_FTOL) {
14725 // Build the FP_TO_INT*_IN_MEM
14726 SDValue Ops[] = { Chain, Value, StackSlot };
14727 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14729 return std::make_pair(FIST, StackSlot);
14731 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14732 DAG.getVTList(MVT::Other, MVT::Glue),
14734 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14735 MVT::i32, ftol.getValue(1));
14736 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14737 MVT::i32, eax.getValue(2));
14738 SDValue Ops[] = { eax, edx };
14739 SDValue pair = IsReplace
14740 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14741 : DAG.getMergeValues(Ops, DL);
14742 return std::make_pair(pair, SDValue());
14746 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14747 const X86Subtarget *Subtarget) {
14748 MVT VT = Op->getSimpleValueType(0);
14749 SDValue In = Op->getOperand(0);
14750 MVT InVT = In.getSimpleValueType();
14753 // Optimize vectors in AVX mode:
14756 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14757 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14758 // Concat upper and lower parts.
14761 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14762 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14763 // Concat upper and lower parts.
14766 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14767 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14768 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14771 if (Subtarget->hasInt256())
14772 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14774 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14775 SDValue Undef = DAG.getUNDEF(InVT);
14776 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14777 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14778 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14780 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14781 VT.getVectorNumElements()/2);
14783 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14784 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14786 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14789 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14790 SelectionDAG &DAG) {
14791 MVT VT = Op->getSimpleValueType(0);
14792 SDValue In = Op->getOperand(0);
14793 MVT InVT = In.getSimpleValueType();
14795 unsigned int NumElts = VT.getVectorNumElements();
14796 if (NumElts != 8 && NumElts != 16)
14799 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14800 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14802 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14803 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14804 // Now we have only mask extension
14805 assert(InVT.getVectorElementType() == MVT::i1);
14806 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14807 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14808 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14809 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14810 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14811 MachinePointerInfo::getConstantPool(),
14812 false, false, false, Alignment);
14814 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14815 if (VT.is512BitVector())
14817 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14820 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14821 SelectionDAG &DAG) {
14822 if (Subtarget->hasFp256()) {
14823 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14831 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14832 SelectionDAG &DAG) {
14834 MVT VT = Op.getSimpleValueType();
14835 SDValue In = Op.getOperand(0);
14836 MVT SVT = In.getSimpleValueType();
14838 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14839 return LowerZERO_EXTEND_AVX512(Op, DAG);
14841 if (Subtarget->hasFp256()) {
14842 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14847 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14848 VT.getVectorNumElements() != SVT.getVectorNumElements());
14852 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14854 MVT VT = Op.getSimpleValueType();
14855 SDValue In = Op.getOperand(0);
14856 MVT InVT = In.getSimpleValueType();
14858 if (VT == MVT::i1) {
14859 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14860 "Invalid scalar TRUNCATE operation");
14861 if (InVT.getSizeInBits() >= 32)
14863 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14864 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14866 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14867 "Invalid TRUNCATE operation");
14869 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14870 if (VT.getVectorElementType().getSizeInBits() >=8)
14871 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14873 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14874 unsigned NumElts = InVT.getVectorNumElements();
14875 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14876 if (InVT.getSizeInBits() < 512) {
14877 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14878 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14882 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14883 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14884 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14885 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14886 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14887 MachinePointerInfo::getConstantPool(),
14888 false, false, false, Alignment);
14889 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14890 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14891 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14894 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14895 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14896 if (Subtarget->hasInt256()) {
14897 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14898 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14899 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14901 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14902 DAG.getIntPtrConstant(0));
14905 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14906 DAG.getIntPtrConstant(0));
14907 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14908 DAG.getIntPtrConstant(2));
14909 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14910 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14911 static const int ShufMask[] = {0, 2, 4, 6};
14912 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14915 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14916 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14917 if (Subtarget->hasInt256()) {
14918 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14920 SmallVector<SDValue,32> pshufbMask;
14921 for (unsigned i = 0; i < 2; ++i) {
14922 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14923 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14924 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14925 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14926 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14927 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14928 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14929 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14930 for (unsigned j = 0; j < 8; ++j)
14931 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14933 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14934 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14935 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14937 static const int ShufMask[] = {0, 2, -1, -1};
14938 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14940 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14941 DAG.getIntPtrConstant(0));
14942 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14945 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14946 DAG.getIntPtrConstant(0));
14948 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14949 DAG.getIntPtrConstant(4));
14951 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14952 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14954 // The PSHUFB mask:
14955 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14956 -1, -1, -1, -1, -1, -1, -1, -1};
14958 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14959 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14960 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14962 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14963 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14965 // The MOVLHPS Mask:
14966 static const int ShufMask2[] = {0, 1, 4, 5};
14967 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14968 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14971 // Handle truncation of V256 to V128 using shuffles.
14972 if (!VT.is128BitVector() || !InVT.is256BitVector())
14975 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14977 unsigned NumElems = VT.getVectorNumElements();
14978 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14980 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14981 // Prepare truncation shuffle mask
14982 for (unsigned i = 0; i != NumElems; ++i)
14983 MaskVec[i] = i * 2;
14984 SDValue V = DAG.getVectorShuffle(NVT, DL,
14985 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14986 DAG.getUNDEF(NVT), &MaskVec[0]);
14987 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14988 DAG.getIntPtrConstant(0));
14991 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14992 SelectionDAG &DAG) const {
14993 assert(!Op.getSimpleValueType().isVector());
14995 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14996 /*IsSigned=*/ true, /*IsReplace=*/ false);
14997 SDValue FIST = Vals.first, StackSlot = Vals.second;
14998 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14999 if (!FIST.getNode()) return Op;
15001 if (StackSlot.getNode())
15002 // Load the result.
15003 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
15004 FIST, StackSlot, MachinePointerInfo(),
15005 false, false, false, 0);
15007 // The node is the result.
15011 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
15012 SelectionDAG &DAG) const {
15013 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
15014 /*IsSigned=*/ false, /*IsReplace=*/ false);
15015 SDValue FIST = Vals.first, StackSlot = Vals.second;
15016 assert(FIST.getNode() && "Unexpected failure");
15018 if (StackSlot.getNode())
15019 // Load the result.
15020 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
15021 FIST, StackSlot, MachinePointerInfo(),
15022 false, false, false, 0);
15024 // The node is the result.
15028 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
15030 MVT VT = Op.getSimpleValueType();
15031 SDValue In = Op.getOperand(0);
15032 MVT SVT = In.getSimpleValueType();
15034 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
15036 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
15037 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
15038 In, DAG.getUNDEF(SVT)));
15041 /// The only differences between FABS and FNEG are the mask and the logic op.
15042 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
15043 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
15044 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
15045 "Wrong opcode for lowering FABS or FNEG.");
15047 bool IsFABS = (Op.getOpcode() == ISD::FABS);
15049 // If this is a FABS and it has an FNEG user, bail out to fold the combination
15050 // into an FNABS. We'll lower the FABS after that if it is still in use.
15052 for (SDNode *User : Op->uses())
15053 if (User->getOpcode() == ISD::FNEG)
15056 SDValue Op0 = Op.getOperand(0);
15057 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
15060 MVT VT = Op.getSimpleValueType();
15061 // Assume scalar op for initialization; update for vector if needed.
15062 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
15063 // generate a 16-byte vector constant and logic op even for the scalar case.
15064 // Using a 16-byte mask allows folding the load of the mask with
15065 // the logic op, so it can save (~4 bytes) on code size.
15067 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
15068 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
15069 // decide if we should generate a 16-byte constant mask when we only need 4 or
15070 // 8 bytes for the scalar case.
15071 if (VT.isVector()) {
15072 EltVT = VT.getVectorElementType();
15073 NumElts = VT.getVectorNumElements();
15076 unsigned EltBits = EltVT.getSizeInBits();
15077 LLVMContext *Context = DAG.getContext();
15078 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
15080 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
15081 Constant *C = ConstantInt::get(*Context, MaskElt);
15082 C = ConstantVector::getSplat(NumElts, C);
15083 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15084 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
15085 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
15086 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15087 MachinePointerInfo::getConstantPool(),
15088 false, false, false, Alignment);
15090 if (VT.isVector()) {
15091 // For a vector, cast operands to a vector type, perform the logic op,
15092 // and cast the result back to the original value type.
15093 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
15094 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
15095 SDValue Operand = IsFNABS ?
15096 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
15097 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
15098 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
15099 return DAG.getNode(ISD::BITCAST, dl, VT,
15100 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
15103 // If not vector, then scalar.
15104 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
15105 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
15106 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
15109 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
15110 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15111 LLVMContext *Context = DAG.getContext();
15112 SDValue Op0 = Op.getOperand(0);
15113 SDValue Op1 = Op.getOperand(1);
15115 MVT VT = Op.getSimpleValueType();
15116 MVT SrcVT = Op1.getSimpleValueType();
15118 // If second operand is smaller, extend it first.
15119 if (SrcVT.bitsLT(VT)) {
15120 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
15123 // And if it is bigger, shrink it first.
15124 if (SrcVT.bitsGT(VT)) {
15125 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
15129 // At this point the operands and the result should have the same
15130 // type, and that won't be f80 since that is not custom lowered.
15132 const fltSemantics &Sem =
15133 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
15134 const unsigned SizeInBits = VT.getSizeInBits();
15136 SmallVector<Constant *, 4> CV(
15137 VT == MVT::f64 ? 2 : 4,
15138 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
15140 // First, clear all bits but the sign bit from the second operand (sign).
15141 CV[0] = ConstantFP::get(*Context,
15142 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
15143 Constant *C = ConstantVector::get(CV);
15144 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15145 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
15146 MachinePointerInfo::getConstantPool(),
15147 false, false, false, 16);
15148 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
15150 // Next, clear the sign bit from the first operand (magnitude).
15151 // If it's a constant, we can clear it here.
15152 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
15153 APFloat APF = Op0CN->getValueAPF();
15154 // If the magnitude is a positive zero, the sign bit alone is enough.
15155 if (APF.isPosZero())
15158 CV[0] = ConstantFP::get(*Context, APF);
15160 CV[0] = ConstantFP::get(
15162 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
15164 C = ConstantVector::get(CV);
15165 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15166 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15167 MachinePointerInfo::getConstantPool(),
15168 false, false, false, 16);
15169 // If the magnitude operand wasn't a constant, we need to AND out the sign.
15170 if (!isa<ConstantFPSDNode>(Op0))
15171 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
15173 // OR the magnitude value with the sign bit.
15174 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
15177 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
15178 SDValue N0 = Op.getOperand(0);
15180 MVT VT = Op.getSimpleValueType();
15182 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
15183 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
15184 DAG.getConstant(1, VT));
15185 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
15188 // Check whether an OR'd tree is PTEST-able.
15189 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
15190 SelectionDAG &DAG) {
15191 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
15193 if (!Subtarget->hasSSE41())
15196 if (!Op->hasOneUse())
15199 SDNode *N = Op.getNode();
15202 SmallVector<SDValue, 8> Opnds;
15203 DenseMap<SDValue, unsigned> VecInMap;
15204 SmallVector<SDValue, 8> VecIns;
15205 EVT VT = MVT::Other;
15207 // Recognize a special case where a vector is casted into wide integer to
15209 Opnds.push_back(N->getOperand(0));
15210 Opnds.push_back(N->getOperand(1));
15212 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
15213 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
15214 // BFS traverse all OR'd operands.
15215 if (I->getOpcode() == ISD::OR) {
15216 Opnds.push_back(I->getOperand(0));
15217 Opnds.push_back(I->getOperand(1));
15218 // Re-evaluate the number of nodes to be traversed.
15219 e += 2; // 2 more nodes (LHS and RHS) are pushed.
15223 // Quit if a non-EXTRACT_VECTOR_ELT
15224 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15227 // Quit if without a constant index.
15228 SDValue Idx = I->getOperand(1);
15229 if (!isa<ConstantSDNode>(Idx))
15232 SDValue ExtractedFromVec = I->getOperand(0);
15233 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
15234 if (M == VecInMap.end()) {
15235 VT = ExtractedFromVec.getValueType();
15236 // Quit if not 128/256-bit vector.
15237 if (!VT.is128BitVector() && !VT.is256BitVector())
15239 // Quit if not the same type.
15240 if (VecInMap.begin() != VecInMap.end() &&
15241 VT != VecInMap.begin()->first.getValueType())
15243 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
15244 VecIns.push_back(ExtractedFromVec);
15246 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
15249 assert((VT.is128BitVector() || VT.is256BitVector()) &&
15250 "Not extracted from 128-/256-bit vector.");
15252 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
15254 for (DenseMap<SDValue, unsigned>::const_iterator
15255 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
15256 // Quit if not all elements are used.
15257 if (I->second != FullMask)
15261 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
15263 // Cast all vectors into TestVT for PTEST.
15264 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
15265 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
15267 // If more than one full vectors are evaluated, OR them first before PTEST.
15268 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
15269 // Each iteration will OR 2 nodes and append the result until there is only
15270 // 1 node left, i.e. the final OR'd value of all vectors.
15271 SDValue LHS = VecIns[Slot];
15272 SDValue RHS = VecIns[Slot + 1];
15273 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
15276 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
15277 VecIns.back(), VecIns.back());
15280 /// \brief return true if \c Op has a use that doesn't just read flags.
15281 static bool hasNonFlagsUse(SDValue Op) {
15282 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
15284 SDNode *User = *UI;
15285 unsigned UOpNo = UI.getOperandNo();
15286 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
15287 // Look pass truncate.
15288 UOpNo = User->use_begin().getOperandNo();
15289 User = *User->use_begin();
15292 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
15293 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
15299 /// Emit nodes that will be selected as "test Op0,Op0", or something
15301 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
15302 SelectionDAG &DAG) const {
15303 if (Op.getValueType() == MVT::i1) {
15304 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
15305 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
15306 DAG.getConstant(0, MVT::i8));
15308 // CF and OF aren't always set the way we want. Determine which
15309 // of these we need.
15310 bool NeedCF = false;
15311 bool NeedOF = false;
15314 case X86::COND_A: case X86::COND_AE:
15315 case X86::COND_B: case X86::COND_BE:
15318 case X86::COND_G: case X86::COND_GE:
15319 case X86::COND_L: case X86::COND_LE:
15320 case X86::COND_O: case X86::COND_NO: {
15321 // Check if we really need to set the
15322 // Overflow flag. If NoSignedWrap is present
15323 // that is not actually needed.
15324 switch (Op->getOpcode()) {
15329 const BinaryWithFlagsSDNode *BinNode =
15330 cast<BinaryWithFlagsSDNode>(Op.getNode());
15331 if (BinNode->hasNoSignedWrap())
15341 // See if we can use the EFLAGS value from the operand instead of
15342 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
15343 // we prove that the arithmetic won't overflow, we can't use OF or CF.
15344 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
15345 // Emit a CMP with 0, which is the TEST pattern.
15346 //if (Op.getValueType() == MVT::i1)
15347 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
15348 // DAG.getConstant(0, MVT::i1));
15349 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15350 DAG.getConstant(0, Op.getValueType()));
15352 unsigned Opcode = 0;
15353 unsigned NumOperands = 0;
15355 // Truncate operations may prevent the merge of the SETCC instruction
15356 // and the arithmetic instruction before it. Attempt to truncate the operands
15357 // of the arithmetic instruction and use a reduced bit-width instruction.
15358 bool NeedTruncation = false;
15359 SDValue ArithOp = Op;
15360 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
15361 SDValue Arith = Op->getOperand(0);
15362 // Both the trunc and the arithmetic op need to have one user each.
15363 if (Arith->hasOneUse())
15364 switch (Arith.getOpcode()) {
15371 NeedTruncation = true;
15377 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
15378 // which may be the result of a CAST. We use the variable 'Op', which is the
15379 // non-casted variable when we check for possible users.
15380 switch (ArithOp.getOpcode()) {
15382 // Due to an isel shortcoming, be conservative if this add is likely to be
15383 // selected as part of a load-modify-store instruction. When the root node
15384 // in a match is a store, isel doesn't know how to remap non-chain non-flag
15385 // uses of other nodes in the match, such as the ADD in this case. This
15386 // leads to the ADD being left around and reselected, with the result being
15387 // two adds in the output. Alas, even if none our users are stores, that
15388 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
15389 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
15390 // climbing the DAG back to the root, and it doesn't seem to be worth the
15392 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15393 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15394 if (UI->getOpcode() != ISD::CopyToReg &&
15395 UI->getOpcode() != ISD::SETCC &&
15396 UI->getOpcode() != ISD::STORE)
15399 if (ConstantSDNode *C =
15400 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
15401 // An add of one will be selected as an INC.
15402 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
15403 Opcode = X86ISD::INC;
15408 // An add of negative one (subtract of one) will be selected as a DEC.
15409 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
15410 Opcode = X86ISD::DEC;
15416 // Otherwise use a regular EFLAGS-setting add.
15417 Opcode = X86ISD::ADD;
15422 // If we have a constant logical shift that's only used in a comparison
15423 // against zero turn it into an equivalent AND. This allows turning it into
15424 // a TEST instruction later.
15425 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
15426 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
15427 EVT VT = Op.getValueType();
15428 unsigned BitWidth = VT.getSizeInBits();
15429 unsigned ShAmt = Op->getConstantOperandVal(1);
15430 if (ShAmt >= BitWidth) // Avoid undefined shifts.
15432 APInt Mask = ArithOp.getOpcode() == ISD::SRL
15433 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
15434 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
15435 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
15437 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
15438 DAG.getConstant(Mask, VT));
15439 DAG.ReplaceAllUsesWith(Op, New);
15445 // If the primary and result isn't used, don't bother using X86ISD::AND,
15446 // because a TEST instruction will be better.
15447 if (!hasNonFlagsUse(Op))
15453 // Due to the ISEL shortcoming noted above, be conservative if this op is
15454 // likely to be selected as part of a load-modify-store instruction.
15455 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15456 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15457 if (UI->getOpcode() == ISD::STORE)
15460 // Otherwise use a regular EFLAGS-setting instruction.
15461 switch (ArithOp.getOpcode()) {
15462 default: llvm_unreachable("unexpected operator!");
15463 case ISD::SUB: Opcode = X86ISD::SUB; break;
15464 case ISD::XOR: Opcode = X86ISD::XOR; break;
15465 case ISD::AND: Opcode = X86ISD::AND; break;
15467 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15468 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15469 if (EFLAGS.getNode())
15472 Opcode = X86ISD::OR;
15486 return SDValue(Op.getNode(), 1);
15492 // If we found that truncation is beneficial, perform the truncation and
15494 if (NeedTruncation) {
15495 EVT VT = Op.getValueType();
15496 SDValue WideVal = Op->getOperand(0);
15497 EVT WideVT = WideVal.getValueType();
15498 unsigned ConvertedOp = 0;
15499 // Use a target machine opcode to prevent further DAGCombine
15500 // optimizations that may separate the arithmetic operations
15501 // from the setcc node.
15502 switch (WideVal.getOpcode()) {
15504 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15505 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15506 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15507 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15508 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15512 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15513 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15514 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15515 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15516 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15522 // Emit a CMP with 0, which is the TEST pattern.
15523 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15524 DAG.getConstant(0, Op.getValueType()));
15526 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15527 SmallVector<SDValue, 4> Ops;
15528 for (unsigned i = 0; i != NumOperands; ++i)
15529 Ops.push_back(Op.getOperand(i));
15531 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15532 DAG.ReplaceAllUsesWith(Op, New);
15533 return SDValue(New.getNode(), 1);
15536 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15538 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15539 SDLoc dl, SelectionDAG &DAG) const {
15540 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15541 if (C->getAPIntValue() == 0)
15542 return EmitTest(Op0, X86CC, dl, DAG);
15544 if (Op0.getValueType() == MVT::i1)
15545 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15548 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15549 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15550 // Do the comparison at i32 if it's smaller, besides the Atom case.
15551 // This avoids subregister aliasing issues. Keep the smaller reference
15552 // if we're optimizing for size, however, as that'll allow better folding
15553 // of memory operations.
15554 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15555 !DAG.getMachineFunction().getFunction()->hasFnAttribute(
15556 Attribute::MinSize) &&
15557 !Subtarget->isAtom()) {
15558 unsigned ExtendOp =
15559 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15560 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15561 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15563 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15564 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15565 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15567 return SDValue(Sub.getNode(), 1);
15569 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15572 /// Convert a comparison if required by the subtarget.
15573 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15574 SelectionDAG &DAG) const {
15575 // If the subtarget does not support the FUCOMI instruction, floating-point
15576 // comparisons have to be converted.
15577 if (Subtarget->hasCMov() ||
15578 Cmp.getOpcode() != X86ISD::CMP ||
15579 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15580 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15583 // The instruction selector will select an FUCOM instruction instead of
15584 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15585 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15586 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15588 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15589 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15590 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15591 DAG.getConstant(8, MVT::i8));
15592 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15593 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15596 /// The minimum architected relative accuracy is 2^-12. We need one
15597 /// Newton-Raphson step to have a good float result (24 bits of precision).
15598 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15599 DAGCombinerInfo &DCI,
15600 unsigned &RefinementSteps,
15601 bool &UseOneConstNR) const {
15602 // FIXME: We should use instruction latency models to calculate the cost of
15603 // each potential sequence, but this is very hard to do reliably because
15604 // at least Intel's Core* chips have variable timing based on the number of
15605 // significant digits in the divisor and/or sqrt operand.
15606 if (!Subtarget->useSqrtEst())
15609 EVT VT = Op.getValueType();
15611 // SSE1 has rsqrtss and rsqrtps.
15612 // TODO: Add support for AVX512 (v16f32).
15613 // It is likely not profitable to do this for f64 because a double-precision
15614 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15615 // instructions: convert to single, rsqrtss, convert back to double, refine
15616 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15617 // along with FMA, this could be a throughput win.
15618 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15619 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15620 RefinementSteps = 1;
15621 UseOneConstNR = false;
15622 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15627 /// The minimum architected relative accuracy is 2^-12. We need one
15628 /// Newton-Raphson step to have a good float result (24 bits of precision).
15629 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15630 DAGCombinerInfo &DCI,
15631 unsigned &RefinementSteps) const {
15632 // FIXME: We should use instruction latency models to calculate the cost of
15633 // each potential sequence, but this is very hard to do reliably because
15634 // at least Intel's Core* chips have variable timing based on the number of
15635 // significant digits in the divisor.
15636 if (!Subtarget->useReciprocalEst())
15639 EVT VT = Op.getValueType();
15641 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15642 // TODO: Add support for AVX512 (v16f32).
15643 // It is likely not profitable to do this for f64 because a double-precision
15644 // reciprocal estimate with refinement on x86 prior to FMA requires
15645 // 15 instructions: convert to single, rcpss, convert back to double, refine
15646 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15647 // along with FMA, this could be a throughput win.
15648 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15649 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15650 RefinementSteps = ReciprocalEstimateRefinementSteps;
15651 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15656 static bool isAllOnes(SDValue V) {
15657 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15658 return C && C->isAllOnesValue();
15661 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15662 /// if it's possible.
15663 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15664 SDLoc dl, SelectionDAG &DAG) const {
15665 SDValue Op0 = And.getOperand(0);
15666 SDValue Op1 = And.getOperand(1);
15667 if (Op0.getOpcode() == ISD::TRUNCATE)
15668 Op0 = Op0.getOperand(0);
15669 if (Op1.getOpcode() == ISD::TRUNCATE)
15670 Op1 = Op1.getOperand(0);
15673 if (Op1.getOpcode() == ISD::SHL)
15674 std::swap(Op0, Op1);
15675 if (Op0.getOpcode() == ISD::SHL) {
15676 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15677 if (And00C->getZExtValue() == 1) {
15678 // If we looked past a truncate, check that it's only truncating away
15680 unsigned BitWidth = Op0.getValueSizeInBits();
15681 unsigned AndBitWidth = And.getValueSizeInBits();
15682 if (BitWidth > AndBitWidth) {
15684 DAG.computeKnownBits(Op0, Zeros, Ones);
15685 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15689 RHS = Op0.getOperand(1);
15691 } else if (Op1.getOpcode() == ISD::Constant) {
15692 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15693 uint64_t AndRHSVal = AndRHS->getZExtValue();
15694 SDValue AndLHS = Op0;
15696 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15697 LHS = AndLHS.getOperand(0);
15698 RHS = AndLHS.getOperand(1);
15701 // Use BT if the immediate can't be encoded in a TEST instruction.
15702 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15704 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15708 if (LHS.getNode()) {
15709 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15710 // instruction. Since the shift amount is in-range-or-undefined, we know
15711 // that doing a bittest on the i32 value is ok. We extend to i32 because
15712 // the encoding for the i16 version is larger than the i32 version.
15713 // Also promote i16 to i32 for performance / code size reason.
15714 if (LHS.getValueType() == MVT::i8 ||
15715 LHS.getValueType() == MVT::i16)
15716 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15718 // If the operand types disagree, extend the shift amount to match. Since
15719 // BT ignores high bits (like shifts) we can use anyextend.
15720 if (LHS.getValueType() != RHS.getValueType())
15721 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15723 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15724 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15725 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15726 DAG.getConstant(Cond, MVT::i8), BT);
15732 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15734 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15739 // SSE Condition code mapping:
15748 switch (SetCCOpcode) {
15749 default: llvm_unreachable("Unexpected SETCC condition");
15751 case ISD::SETEQ: SSECC = 0; break;
15753 case ISD::SETGT: Swap = true; // Fallthrough
15755 case ISD::SETOLT: SSECC = 1; break;
15757 case ISD::SETGE: Swap = true; // Fallthrough
15759 case ISD::SETOLE: SSECC = 2; break;
15760 case ISD::SETUO: SSECC = 3; break;
15762 case ISD::SETNE: SSECC = 4; break;
15763 case ISD::SETULE: Swap = true; // Fallthrough
15764 case ISD::SETUGE: SSECC = 5; break;
15765 case ISD::SETULT: Swap = true; // Fallthrough
15766 case ISD::SETUGT: SSECC = 6; break;
15767 case ISD::SETO: SSECC = 7; break;
15769 case ISD::SETONE: SSECC = 8; break;
15772 std::swap(Op0, Op1);
15777 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15778 // ones, and then concatenate the result back.
15779 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15780 MVT VT = Op.getSimpleValueType();
15782 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15783 "Unsupported value type for operation");
15785 unsigned NumElems = VT.getVectorNumElements();
15787 SDValue CC = Op.getOperand(2);
15789 // Extract the LHS vectors
15790 SDValue LHS = Op.getOperand(0);
15791 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15792 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15794 // Extract the RHS vectors
15795 SDValue RHS = Op.getOperand(1);
15796 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15797 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15799 // Issue the operation on the smaller types and concatenate the result back
15800 MVT EltVT = VT.getVectorElementType();
15801 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15802 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15803 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15804 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15807 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15808 const X86Subtarget *Subtarget) {
15809 SDValue Op0 = Op.getOperand(0);
15810 SDValue Op1 = Op.getOperand(1);
15811 SDValue CC = Op.getOperand(2);
15812 MVT VT = Op.getSimpleValueType();
15815 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15816 Op.getValueType().getScalarType() == MVT::i1 &&
15817 "Cannot set masked compare for this operation");
15819 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15821 bool Unsigned = false;
15824 switch (SetCCOpcode) {
15825 default: llvm_unreachable("Unexpected SETCC condition");
15826 case ISD::SETNE: SSECC = 4; break;
15827 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15828 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15829 case ISD::SETLT: Swap = true; //fall-through
15830 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15831 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15832 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15833 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15834 case ISD::SETULE: Unsigned = true; //fall-through
15835 case ISD::SETLE: SSECC = 2; break;
15839 std::swap(Op0, Op1);
15841 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15842 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15843 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15844 DAG.getConstant(SSECC, MVT::i8));
15847 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15848 /// operand \p Op1. If non-trivial (for example because it's not constant)
15849 /// return an empty value.
15850 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15852 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15856 MVT VT = Op1.getSimpleValueType();
15857 MVT EVT = VT.getVectorElementType();
15858 unsigned n = VT.getVectorNumElements();
15859 SmallVector<SDValue, 8> ULTOp1;
15861 for (unsigned i = 0; i < n; ++i) {
15862 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15863 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15866 // Avoid underflow.
15867 APInt Val = Elt->getAPIntValue();
15871 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15874 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15877 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15878 SelectionDAG &DAG) {
15879 SDValue Op0 = Op.getOperand(0);
15880 SDValue Op1 = Op.getOperand(1);
15881 SDValue CC = Op.getOperand(2);
15882 MVT VT = Op.getSimpleValueType();
15883 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15884 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15889 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15890 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15893 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15894 unsigned Opc = X86ISD::CMPP;
15895 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15896 assert(VT.getVectorNumElements() <= 16);
15897 Opc = X86ISD::CMPM;
15899 // In the two special cases we can't handle, emit two comparisons.
15902 unsigned CombineOpc;
15903 if (SetCCOpcode == ISD::SETUEQ) {
15904 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15906 assert(SetCCOpcode == ISD::SETONE);
15907 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15910 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15911 DAG.getConstant(CC0, MVT::i8));
15912 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15913 DAG.getConstant(CC1, MVT::i8));
15914 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15916 // Handle all other FP comparisons here.
15917 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15918 DAG.getConstant(SSECC, MVT::i8));
15921 // Break 256-bit integer vector compare into smaller ones.
15922 if (VT.is256BitVector() && !Subtarget->hasInt256())
15923 return Lower256IntVSETCC(Op, DAG);
15925 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15926 EVT OpVT = Op1.getValueType();
15927 if (Subtarget->hasAVX512()) {
15928 if (Op1.getValueType().is512BitVector() ||
15929 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15930 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15931 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15933 // In AVX-512 architecture setcc returns mask with i1 elements,
15934 // But there is no compare instruction for i8 and i16 elements in KNL.
15935 // We are not talking about 512-bit operands in this case, these
15936 // types are illegal.
15938 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15939 OpVT.getVectorElementType().getSizeInBits() >= 8))
15940 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15941 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15944 // We are handling one of the integer comparisons here. Since SSE only has
15945 // GT and EQ comparisons for integer, swapping operands and multiple
15946 // operations may be required for some comparisons.
15948 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15949 bool Subus = false;
15951 switch (SetCCOpcode) {
15952 default: llvm_unreachable("Unexpected SETCC condition");
15953 case ISD::SETNE: Invert = true;
15954 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15955 case ISD::SETLT: Swap = true;
15956 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15957 case ISD::SETGE: Swap = true;
15958 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15959 Invert = true; break;
15960 case ISD::SETULT: Swap = true;
15961 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15962 FlipSigns = true; break;
15963 case ISD::SETUGE: Swap = true;
15964 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15965 FlipSigns = true; Invert = true; break;
15968 // Special case: Use min/max operations for SETULE/SETUGE
15969 MVT VET = VT.getVectorElementType();
15971 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15972 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15975 switch (SetCCOpcode) {
15977 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15978 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15981 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15984 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15985 if (!MinMax && hasSubus) {
15986 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15988 // t = psubus Op0, Op1
15989 // pcmpeq t, <0..0>
15990 switch (SetCCOpcode) {
15992 case ISD::SETULT: {
15993 // If the comparison is against a constant we can turn this into a
15994 // setule. With psubus, setule does not require a swap. This is
15995 // beneficial because the constant in the register is no longer
15996 // destructed as the destination so it can be hoisted out of a loop.
15997 // Only do this pre-AVX since vpcmp* is no longer destructive.
15998 if (Subtarget->hasAVX())
16000 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
16001 if (ULEOp1.getNode()) {
16003 Subus = true; Invert = false; Swap = false;
16007 // Psubus is better than flip-sign because it requires no inversion.
16008 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
16009 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
16013 Opc = X86ISD::SUBUS;
16019 std::swap(Op0, Op1);
16021 // Check that the operation in question is available (most are plain SSE2,
16022 // but PCMPGTQ and PCMPEQQ have different requirements).
16023 if (VT == MVT::v2i64) {
16024 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
16025 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
16027 // First cast everything to the right type.
16028 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
16029 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
16031 // Since SSE has no unsigned integer comparisons, we need to flip the sign
16032 // bits of the inputs before performing those operations. The lower
16033 // compare is always unsigned.
16036 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
16038 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
16039 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
16040 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
16041 Sign, Zero, Sign, Zero);
16043 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
16044 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
16046 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
16047 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
16048 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
16050 // Create masks for only the low parts/high parts of the 64 bit integers.
16051 static const int MaskHi[] = { 1, 1, 3, 3 };
16052 static const int MaskLo[] = { 0, 0, 2, 2 };
16053 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
16054 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
16055 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
16057 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
16058 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
16061 Result = DAG.getNOT(dl, Result, MVT::v4i32);
16063 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
16066 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
16067 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
16068 // pcmpeqd + pshufd + pand.
16069 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
16071 // First cast everything to the right type.
16072 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
16073 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
16076 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
16078 // Make sure the lower and upper halves are both all-ones.
16079 static const int Mask[] = { 1, 0, 3, 2 };
16080 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
16081 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
16084 Result = DAG.getNOT(dl, Result, MVT::v4i32);
16086 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
16090 // Since SSE has no unsigned integer comparisons, we need to flip the sign
16091 // bits of the inputs before performing those operations.
16093 EVT EltVT = VT.getVectorElementType();
16094 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
16095 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
16096 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
16099 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
16101 // If the logical-not of the result is required, perform that now.
16103 Result = DAG.getNOT(dl, Result, VT);
16106 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
16109 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
16110 getZeroVector(VT, Subtarget, DAG, dl));
16115 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
16117 MVT VT = Op.getSimpleValueType();
16119 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
16121 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
16122 && "SetCC type must be 8-bit or 1-bit integer");
16123 SDValue Op0 = Op.getOperand(0);
16124 SDValue Op1 = Op.getOperand(1);
16126 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
16128 // Optimize to BT if possible.
16129 // Lower (X & (1 << N)) == 0 to BT(X, N).
16130 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
16131 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
16132 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
16133 Op1.getOpcode() == ISD::Constant &&
16134 cast<ConstantSDNode>(Op1)->isNullValue() &&
16135 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16136 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
16137 if (NewSetCC.getNode()) {
16139 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
16144 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
16146 if (Op1.getOpcode() == ISD::Constant &&
16147 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
16148 cast<ConstantSDNode>(Op1)->isNullValue()) &&
16149 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16151 // If the input is a setcc, then reuse the input setcc or use a new one with
16152 // the inverted condition.
16153 if (Op0.getOpcode() == X86ISD::SETCC) {
16154 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
16155 bool Invert = (CC == ISD::SETNE) ^
16156 cast<ConstantSDNode>(Op1)->isNullValue();
16160 CCode = X86::GetOppositeBranchCondition(CCode);
16161 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16162 DAG.getConstant(CCode, MVT::i8),
16163 Op0.getOperand(1));
16165 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16169 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
16170 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
16171 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16173 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
16174 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
16177 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
16178 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
16179 if (X86CC == X86::COND_INVALID)
16182 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
16183 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
16184 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16185 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
16187 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16191 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
16192 static bool isX86LogicalCmp(SDValue Op) {
16193 unsigned Opc = Op.getNode()->getOpcode();
16194 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
16195 Opc == X86ISD::SAHF)
16197 if (Op.getResNo() == 1 &&
16198 (Opc == X86ISD::ADD ||
16199 Opc == X86ISD::SUB ||
16200 Opc == X86ISD::ADC ||
16201 Opc == X86ISD::SBB ||
16202 Opc == X86ISD::SMUL ||
16203 Opc == X86ISD::UMUL ||
16204 Opc == X86ISD::INC ||
16205 Opc == X86ISD::DEC ||
16206 Opc == X86ISD::OR ||
16207 Opc == X86ISD::XOR ||
16208 Opc == X86ISD::AND))
16211 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
16217 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
16218 if (V.getOpcode() != ISD::TRUNCATE)
16221 SDValue VOp0 = V.getOperand(0);
16222 unsigned InBits = VOp0.getValueSizeInBits();
16223 unsigned Bits = V.getValueSizeInBits();
16224 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
16227 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
16228 bool addTest = true;
16229 SDValue Cond = Op.getOperand(0);
16230 SDValue Op1 = Op.getOperand(1);
16231 SDValue Op2 = Op.getOperand(2);
16233 EVT VT = Op1.getValueType();
16236 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
16237 // are available. Otherwise fp cmovs get lowered into a less efficient branch
16238 // sequence later on.
16239 if (Cond.getOpcode() == ISD::SETCC &&
16240 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
16241 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
16242 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
16243 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
16244 int SSECC = translateX86FSETCC(
16245 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
16248 if (Subtarget->hasAVX512()) {
16249 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
16250 DAG.getConstant(SSECC, MVT::i8));
16251 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
16253 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
16254 DAG.getConstant(SSECC, MVT::i8));
16255 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
16256 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
16257 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
16261 if (Cond.getOpcode() == ISD::SETCC) {
16262 SDValue NewCond = LowerSETCC(Cond, DAG);
16263 if (NewCond.getNode())
16267 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
16268 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
16269 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
16270 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
16271 if (Cond.getOpcode() == X86ISD::SETCC &&
16272 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
16273 isZero(Cond.getOperand(1).getOperand(1))) {
16274 SDValue Cmp = Cond.getOperand(1);
16276 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
16278 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
16279 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
16280 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
16282 SDValue CmpOp0 = Cmp.getOperand(0);
16283 // Apply further optimizations for special cases
16284 // (select (x != 0), -1, 0) -> neg & sbb
16285 // (select (x == 0), 0, -1) -> neg & sbb
16286 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
16287 if (YC->isNullValue() &&
16288 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
16289 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
16290 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
16291 DAG.getConstant(0, CmpOp0.getValueType()),
16293 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16294 DAG.getConstant(X86::COND_B, MVT::i8),
16295 SDValue(Neg.getNode(), 1));
16299 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
16300 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
16301 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16303 SDValue Res = // Res = 0 or -1.
16304 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16305 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
16307 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
16308 Res = DAG.getNOT(DL, Res, Res.getValueType());
16310 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
16311 if (!N2C || !N2C->isNullValue())
16312 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
16317 // Look past (and (setcc_carry (cmp ...)), 1).
16318 if (Cond.getOpcode() == ISD::AND &&
16319 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16320 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16321 if (C && C->getAPIntValue() == 1)
16322 Cond = Cond.getOperand(0);
16325 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16326 // setting operand in place of the X86ISD::SETCC.
16327 unsigned CondOpcode = Cond.getOpcode();
16328 if (CondOpcode == X86ISD::SETCC ||
16329 CondOpcode == X86ISD::SETCC_CARRY) {
16330 CC = Cond.getOperand(0);
16332 SDValue Cmp = Cond.getOperand(1);
16333 unsigned Opc = Cmp.getOpcode();
16334 MVT VT = Op.getSimpleValueType();
16336 bool IllegalFPCMov = false;
16337 if (VT.isFloatingPoint() && !VT.isVector() &&
16338 !isScalarFPTypeInSSEReg(VT)) // FPStack?
16339 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
16341 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
16342 Opc == X86ISD::BT) { // FIXME
16346 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16347 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16348 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16349 Cond.getOperand(0).getValueType() != MVT::i8)) {
16350 SDValue LHS = Cond.getOperand(0);
16351 SDValue RHS = Cond.getOperand(1);
16352 unsigned X86Opcode;
16355 switch (CondOpcode) {
16356 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16357 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16358 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16359 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16360 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16361 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16362 default: llvm_unreachable("unexpected overflowing operator");
16364 if (CondOpcode == ISD::UMULO)
16365 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16368 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16370 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
16372 if (CondOpcode == ISD::UMULO)
16373 Cond = X86Op.getValue(2);
16375 Cond = X86Op.getValue(1);
16377 CC = DAG.getConstant(X86Cond, MVT::i8);
16382 // Look pass the truncate if the high bits are known zero.
16383 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16384 Cond = Cond.getOperand(0);
16386 // We know the result of AND is compared against zero. Try to match
16388 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16389 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
16390 if (NewSetCC.getNode()) {
16391 CC = NewSetCC.getOperand(0);
16392 Cond = NewSetCC.getOperand(1);
16399 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16400 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
16403 // a < b ? -1 : 0 -> RES = ~setcc_carry
16404 // a < b ? 0 : -1 -> RES = setcc_carry
16405 // a >= b ? -1 : 0 -> RES = setcc_carry
16406 // a >= b ? 0 : -1 -> RES = ~setcc_carry
16407 if (Cond.getOpcode() == X86ISD::SUB) {
16408 Cond = ConvertCmpIfNecessary(Cond, DAG);
16409 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
16411 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
16412 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
16413 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16414 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
16415 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
16416 return DAG.getNOT(DL, Res, Res.getValueType());
16421 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
16422 // widen the cmov and push the truncate through. This avoids introducing a new
16423 // branch during isel and doesn't add any extensions.
16424 if (Op.getValueType() == MVT::i8 &&
16425 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
16426 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
16427 if (T1.getValueType() == T2.getValueType() &&
16428 // Blacklist CopyFromReg to avoid partial register stalls.
16429 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
16430 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
16431 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
16432 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
16436 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
16437 // condition is true.
16438 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
16439 SDValue Ops[] = { Op2, Op1, CC, Cond };
16440 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
16443 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
16444 SelectionDAG &DAG) {
16445 MVT VT = Op->getSimpleValueType(0);
16446 SDValue In = Op->getOperand(0);
16447 MVT InVT = In.getSimpleValueType();
16448 MVT VTElt = VT.getVectorElementType();
16449 MVT InVTElt = InVT.getVectorElementType();
16453 if ((InVTElt == MVT::i1) &&
16454 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
16455 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16457 ((Subtarget->hasBWI() && VT.is512BitVector() &&
16458 VTElt.getSizeInBits() <= 16)) ||
16460 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16461 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16463 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16464 VTElt.getSizeInBits() >= 32))))
16465 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16467 unsigned int NumElts = VT.getVectorNumElements();
16469 if (NumElts != 8 && NumElts != 16)
16472 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16473 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16474 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16475 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16478 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16479 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16481 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16482 Constant *C = ConstantInt::get(*DAG.getContext(),
16483 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16485 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16486 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16487 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16488 MachinePointerInfo::getConstantPool(),
16489 false, false, false, Alignment);
16490 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16491 if (VT.is512BitVector())
16493 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16496 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16497 SelectionDAG &DAG) {
16498 MVT VT = Op->getSimpleValueType(0);
16499 SDValue In = Op->getOperand(0);
16500 MVT InVT = In.getSimpleValueType();
16503 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16504 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16506 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16507 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16508 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16511 if (Subtarget->hasInt256())
16512 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16514 // Optimize vectors in AVX mode
16515 // Sign extend v8i16 to v8i32 and
16518 // Divide input vector into two parts
16519 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16520 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16521 // concat the vectors to original VT
16523 unsigned NumElems = InVT.getVectorNumElements();
16524 SDValue Undef = DAG.getUNDEF(InVT);
16526 SmallVector<int,8> ShufMask1(NumElems, -1);
16527 for (unsigned i = 0; i != NumElems/2; ++i)
16530 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16532 SmallVector<int,8> ShufMask2(NumElems, -1);
16533 for (unsigned i = 0; i != NumElems/2; ++i)
16534 ShufMask2[i] = i + NumElems/2;
16536 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16538 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16539 VT.getVectorNumElements()/2);
16541 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16542 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16544 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16547 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16548 // may emit an illegal shuffle but the expansion is still better than scalar
16549 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16550 // we'll emit a shuffle and a arithmetic shift.
16551 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
16552 // TODO: It is possible to support ZExt by zeroing the undef values during
16553 // the shuffle phase or after the shuffle.
16554 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16555 SelectionDAG &DAG) {
16556 MVT RegVT = Op.getSimpleValueType();
16557 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16558 assert(RegVT.isInteger() &&
16559 "We only custom lower integer vector sext loads.");
16561 // Nothing useful we can do without SSE2 shuffles.
16562 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16564 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16566 EVT MemVT = Ld->getMemoryVT();
16567 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16568 unsigned RegSz = RegVT.getSizeInBits();
16570 ISD::LoadExtType Ext = Ld->getExtensionType();
16572 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16573 && "Only anyext and sext are currently implemented.");
16574 assert(MemVT != RegVT && "Cannot extend to the same type");
16575 assert(MemVT.isVector() && "Must load a vector from memory");
16577 unsigned NumElems = RegVT.getVectorNumElements();
16578 unsigned MemSz = MemVT.getSizeInBits();
16579 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16581 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16582 // The only way in which we have a legal 256-bit vector result but not the
16583 // integer 256-bit operations needed to directly lower a sextload is if we
16584 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16585 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16586 // correctly legalized. We do this late to allow the canonical form of
16587 // sextload to persist throughout the rest of the DAG combiner -- it wants
16588 // to fold together any extensions it can, and so will fuse a sign_extend
16589 // of an sextload into a sextload targeting a wider value.
16591 if (MemSz == 128) {
16592 // Just switch this to a normal load.
16593 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16594 "it must be a legal 128-bit vector "
16596 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16597 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16598 Ld->isInvariant(), Ld->getAlignment());
16600 assert(MemSz < 128 &&
16601 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16602 // Do an sext load to a 128-bit vector type. We want to use the same
16603 // number of elements, but elements half as wide. This will end up being
16604 // recursively lowered by this routine, but will succeed as we definitely
16605 // have all the necessary features if we're using AVX1.
16607 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16608 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16610 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16611 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16612 Ld->isNonTemporal(), Ld->isInvariant(),
16613 Ld->getAlignment());
16616 // Replace chain users with the new chain.
16617 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16618 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16620 // Finally, do a normal sign-extend to the desired register.
16621 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16624 // All sizes must be a power of two.
16625 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16626 "Non-power-of-two elements are not custom lowered!");
16628 // Attempt to load the original value using scalar loads.
16629 // Find the largest scalar type that divides the total loaded size.
16630 MVT SclrLoadTy = MVT::i8;
16631 for (MVT Tp : MVT::integer_valuetypes()) {
16632 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16637 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16638 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16640 SclrLoadTy = MVT::f64;
16642 // Calculate the number of scalar loads that we need to perform
16643 // in order to load our vector from memory.
16644 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16646 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16647 "Can only lower sext loads with a single scalar load!");
16649 unsigned loadRegZize = RegSz;
16650 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16653 // Represent our vector as a sequence of elements which are the
16654 // largest scalar that we can load.
16655 EVT LoadUnitVecVT = EVT::getVectorVT(
16656 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16658 // Represent the data using the same element type that is stored in
16659 // memory. In practice, we ''widen'' MemVT.
16661 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16662 loadRegZize / MemVT.getScalarType().getSizeInBits());
16664 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16665 "Invalid vector type");
16667 // We can't shuffle using an illegal type.
16668 assert(TLI.isTypeLegal(WideVecVT) &&
16669 "We only lower types that form legal widened vector types");
16671 SmallVector<SDValue, 8> Chains;
16672 SDValue Ptr = Ld->getBasePtr();
16673 SDValue Increment =
16674 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16675 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16677 for (unsigned i = 0; i < NumLoads; ++i) {
16678 // Perform a single load.
16679 SDValue ScalarLoad =
16680 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16681 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16682 Ld->getAlignment());
16683 Chains.push_back(ScalarLoad.getValue(1));
16684 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16685 // another round of DAGCombining.
16687 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16689 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16690 ScalarLoad, DAG.getIntPtrConstant(i));
16692 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16695 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16697 // Bitcast the loaded value to a vector of the original element type, in
16698 // the size of the target vector type.
16699 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16700 unsigned SizeRatio = RegSz / MemSz;
16702 if (Ext == ISD::SEXTLOAD) {
16703 // If we have SSE4.1, we can directly emit a VSEXT node.
16704 if (Subtarget->hasSSE41()) {
16705 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16706 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16710 // Otherwise we'll shuffle the small elements in the high bits of the
16711 // larger type and perform an arithmetic shift. If the shift is not legal
16712 // it's better to scalarize.
16713 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16714 "We can't implement a sext load without an arithmetic right shift!");
16716 // Redistribute the loaded elements into the different locations.
16717 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16718 for (unsigned i = 0; i != NumElems; ++i)
16719 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16721 SDValue Shuff = DAG.getVectorShuffle(
16722 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16724 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16726 // Build the arithmetic shift.
16727 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16728 MemVT.getVectorElementType().getSizeInBits();
16730 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16732 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16736 // Redistribute the loaded elements into the different locations.
16737 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16738 for (unsigned i = 0; i != NumElems; ++i)
16739 ShuffleVec[i * SizeRatio] = i;
16741 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16742 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16744 // Bitcast to the requested type.
16745 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16746 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16750 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16751 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16752 // from the AND / OR.
16753 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16754 Opc = Op.getOpcode();
16755 if (Opc != ISD::OR && Opc != ISD::AND)
16757 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16758 Op.getOperand(0).hasOneUse() &&
16759 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16760 Op.getOperand(1).hasOneUse());
16763 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16764 // 1 and that the SETCC node has a single use.
16765 static bool isXor1OfSetCC(SDValue Op) {
16766 if (Op.getOpcode() != ISD::XOR)
16768 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16769 if (N1C && N1C->getAPIntValue() == 1) {
16770 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16771 Op.getOperand(0).hasOneUse();
16776 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16777 bool addTest = true;
16778 SDValue Chain = Op.getOperand(0);
16779 SDValue Cond = Op.getOperand(1);
16780 SDValue Dest = Op.getOperand(2);
16783 bool Inverted = false;
16785 if (Cond.getOpcode() == ISD::SETCC) {
16786 // Check for setcc([su]{add,sub,mul}o == 0).
16787 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16788 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16789 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16790 Cond.getOperand(0).getResNo() == 1 &&
16791 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16792 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16793 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16794 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16795 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16796 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16798 Cond = Cond.getOperand(0);
16800 SDValue NewCond = LowerSETCC(Cond, DAG);
16801 if (NewCond.getNode())
16806 // FIXME: LowerXALUO doesn't handle these!!
16807 else if (Cond.getOpcode() == X86ISD::ADD ||
16808 Cond.getOpcode() == X86ISD::SUB ||
16809 Cond.getOpcode() == X86ISD::SMUL ||
16810 Cond.getOpcode() == X86ISD::UMUL)
16811 Cond = LowerXALUO(Cond, DAG);
16814 // Look pass (and (setcc_carry (cmp ...)), 1).
16815 if (Cond.getOpcode() == ISD::AND &&
16816 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16817 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16818 if (C && C->getAPIntValue() == 1)
16819 Cond = Cond.getOperand(0);
16822 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16823 // setting operand in place of the X86ISD::SETCC.
16824 unsigned CondOpcode = Cond.getOpcode();
16825 if (CondOpcode == X86ISD::SETCC ||
16826 CondOpcode == X86ISD::SETCC_CARRY) {
16827 CC = Cond.getOperand(0);
16829 SDValue Cmp = Cond.getOperand(1);
16830 unsigned Opc = Cmp.getOpcode();
16831 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16832 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16836 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16840 // These can only come from an arithmetic instruction with overflow,
16841 // e.g. SADDO, UADDO.
16842 Cond = Cond.getNode()->getOperand(1);
16848 CondOpcode = Cond.getOpcode();
16849 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16850 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16851 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16852 Cond.getOperand(0).getValueType() != MVT::i8)) {
16853 SDValue LHS = Cond.getOperand(0);
16854 SDValue RHS = Cond.getOperand(1);
16855 unsigned X86Opcode;
16858 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16859 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16861 switch (CondOpcode) {
16862 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16864 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16866 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16869 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16870 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16872 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16874 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16877 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16878 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16879 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16880 default: llvm_unreachable("unexpected overflowing operator");
16883 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16884 if (CondOpcode == ISD::UMULO)
16885 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16888 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16890 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16892 if (CondOpcode == ISD::UMULO)
16893 Cond = X86Op.getValue(2);
16895 Cond = X86Op.getValue(1);
16897 CC = DAG.getConstant(X86Cond, MVT::i8);
16901 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16902 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16903 if (CondOpc == ISD::OR) {
16904 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16905 // two branches instead of an explicit OR instruction with a
16907 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16908 isX86LogicalCmp(Cmp)) {
16909 CC = Cond.getOperand(0).getOperand(0);
16910 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16911 Chain, Dest, CC, Cmp);
16912 CC = Cond.getOperand(1).getOperand(0);
16916 } else { // ISD::AND
16917 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16918 // two branches instead of an explicit AND instruction with a
16919 // separate test. However, we only do this if this block doesn't
16920 // have a fall-through edge, because this requires an explicit
16921 // jmp when the condition is false.
16922 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16923 isX86LogicalCmp(Cmp) &&
16924 Op.getNode()->hasOneUse()) {
16925 X86::CondCode CCode =
16926 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16927 CCode = X86::GetOppositeBranchCondition(CCode);
16928 CC = DAG.getConstant(CCode, MVT::i8);
16929 SDNode *User = *Op.getNode()->use_begin();
16930 // Look for an unconditional branch following this conditional branch.
16931 // We need this because we need to reverse the successors in order
16932 // to implement FCMP_OEQ.
16933 if (User->getOpcode() == ISD::BR) {
16934 SDValue FalseBB = User->getOperand(1);
16936 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16937 assert(NewBR == User);
16941 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16942 Chain, Dest, CC, Cmp);
16943 X86::CondCode CCode =
16944 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16945 CCode = X86::GetOppositeBranchCondition(CCode);
16946 CC = DAG.getConstant(CCode, MVT::i8);
16952 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16953 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16954 // It should be transformed during dag combiner except when the condition
16955 // is set by a arithmetics with overflow node.
16956 X86::CondCode CCode =
16957 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16958 CCode = X86::GetOppositeBranchCondition(CCode);
16959 CC = DAG.getConstant(CCode, MVT::i8);
16960 Cond = Cond.getOperand(0).getOperand(1);
16962 } else if (Cond.getOpcode() == ISD::SETCC &&
16963 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16964 // For FCMP_OEQ, we can emit
16965 // two branches instead of an explicit AND instruction with a
16966 // separate test. However, we only do this if this block doesn't
16967 // have a fall-through edge, because this requires an explicit
16968 // jmp when the condition is false.
16969 if (Op.getNode()->hasOneUse()) {
16970 SDNode *User = *Op.getNode()->use_begin();
16971 // Look for an unconditional branch following this conditional branch.
16972 // We need this because we need to reverse the successors in order
16973 // to implement FCMP_OEQ.
16974 if (User->getOpcode() == ISD::BR) {
16975 SDValue FalseBB = User->getOperand(1);
16977 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16978 assert(NewBR == User);
16982 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16983 Cond.getOperand(0), Cond.getOperand(1));
16984 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16985 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16986 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16987 Chain, Dest, CC, Cmp);
16988 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16993 } else if (Cond.getOpcode() == ISD::SETCC &&
16994 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16995 // For FCMP_UNE, we can emit
16996 // two branches instead of an explicit AND instruction with a
16997 // separate test. However, we only do this if this block doesn't
16998 // have a fall-through edge, because this requires an explicit
16999 // jmp when the condition is false.
17000 if (Op.getNode()->hasOneUse()) {
17001 SDNode *User = *Op.getNode()->use_begin();
17002 // Look for an unconditional branch following this conditional branch.
17003 // We need this because we need to reverse the successors in order
17004 // to implement FCMP_UNE.
17005 if (User->getOpcode() == ISD::BR) {
17006 SDValue FalseBB = User->getOperand(1);
17008 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
17009 assert(NewBR == User);
17012 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
17013 Cond.getOperand(0), Cond.getOperand(1));
17014 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
17015 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
17016 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
17017 Chain, Dest, CC, Cmp);
17018 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
17028 // Look pass the truncate if the high bits are known zero.
17029 if (isTruncWithZeroHighBitsInput(Cond, DAG))
17030 Cond = Cond.getOperand(0);
17032 // We know the result of AND is compared against zero. Try to match
17034 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
17035 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
17036 if (NewSetCC.getNode()) {
17037 CC = NewSetCC.getOperand(0);
17038 Cond = NewSetCC.getOperand(1);
17045 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
17046 CC = DAG.getConstant(X86Cond, MVT::i8);
17047 Cond = EmitTest(Cond, X86Cond, dl, DAG);
17049 Cond = ConvertCmpIfNecessary(Cond, DAG);
17050 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
17051 Chain, Dest, CC, Cond);
17054 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
17055 // Calls to _alloca are needed to probe the stack when allocating more than 4k
17056 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
17057 // that the guard pages used by the OS virtual memory manager are allocated in
17058 // correct sequence.
17060 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
17061 SelectionDAG &DAG) const {
17062 MachineFunction &MF = DAG.getMachineFunction();
17063 bool SplitStack = MF.shouldSplitStack();
17064 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
17069 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
17070 SDNode* Node = Op.getNode();
17072 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
17073 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
17074 " not tell us which reg is the stack pointer!");
17075 EVT VT = Node->getValueType(0);
17076 SDValue Tmp1 = SDValue(Node, 0);
17077 SDValue Tmp2 = SDValue(Node, 1);
17078 SDValue Tmp3 = Node->getOperand(2);
17079 SDValue Chain = Tmp1.getOperand(0);
17081 // Chain the dynamic stack allocation so that it doesn't modify the stack
17082 // pointer when other instructions are using the stack.
17083 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
17086 SDValue Size = Tmp2.getOperand(1);
17087 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
17088 Chain = SP.getValue(1);
17089 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
17090 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
17091 unsigned StackAlign = TFI.getStackAlignment();
17092 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
17093 if (Align > StackAlign)
17094 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
17095 DAG.getConstant(-(uint64_t)Align, VT));
17096 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
17098 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
17099 DAG.getIntPtrConstant(0, true), SDValue(),
17102 SDValue Ops[2] = { Tmp1, Tmp2 };
17103 return DAG.getMergeValues(Ops, dl);
17107 SDValue Chain = Op.getOperand(0);
17108 SDValue Size = Op.getOperand(1);
17109 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
17110 EVT VT = Op.getNode()->getValueType(0);
17112 bool Is64Bit = Subtarget->is64Bit();
17113 EVT SPTy = getPointerTy();
17116 MachineRegisterInfo &MRI = MF.getRegInfo();
17119 // The 64 bit implementation of segmented stacks needs to clobber both r10
17120 // r11. This makes it impossible to use it along with nested parameters.
17121 const Function *F = MF.getFunction();
17123 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
17125 if (I->hasNestAttr())
17126 report_fatal_error("Cannot use segmented stacks with functions that "
17127 "have nested arguments.");
17130 const TargetRegisterClass *AddrRegClass =
17131 getRegClassFor(getPointerTy());
17132 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
17133 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
17134 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
17135 DAG.getRegister(Vreg, SPTy));
17136 SDValue Ops1[2] = { Value, Chain };
17137 return DAG.getMergeValues(Ops1, dl);
17140 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
17142 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
17143 Flag = Chain.getValue(1);
17144 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17146 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
17148 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17149 unsigned SPReg = RegInfo->getStackRegister();
17150 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
17151 Chain = SP.getValue(1);
17154 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
17155 DAG.getConstant(-(uint64_t)Align, VT));
17156 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
17159 SDValue Ops1[2] = { SP, Chain };
17160 return DAG.getMergeValues(Ops1, dl);
17164 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
17165 MachineFunction &MF = DAG.getMachineFunction();
17166 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
17168 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17171 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
17172 // vastart just stores the address of the VarArgsFrameIndex slot into the
17173 // memory location argument.
17174 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17176 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
17177 MachinePointerInfo(SV), false, false, 0);
17181 // gp_offset (0 - 6 * 8)
17182 // fp_offset (48 - 48 + 8 * 16)
17183 // overflow_arg_area (point to parameters coming in memory).
17185 SmallVector<SDValue, 8> MemOps;
17186 SDValue FIN = Op.getOperand(1);
17188 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
17189 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
17191 FIN, MachinePointerInfo(SV), false, false, 0);
17192 MemOps.push_back(Store);
17195 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17196 FIN, DAG.getIntPtrConstant(4));
17197 Store = DAG.getStore(Op.getOperand(0), DL,
17198 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
17200 FIN, MachinePointerInfo(SV, 4), false, false, 0);
17201 MemOps.push_back(Store);
17203 // Store ptr to overflow_arg_area
17204 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17205 FIN, DAG.getIntPtrConstant(4));
17206 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17208 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
17209 MachinePointerInfo(SV, 8),
17211 MemOps.push_back(Store);
17213 // Store ptr to reg_save_area.
17214 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17215 FIN, DAG.getIntPtrConstant(8));
17216 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
17218 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
17219 MachinePointerInfo(SV, 16), false, false, 0);
17220 MemOps.push_back(Store);
17221 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
17224 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
17225 assert(Subtarget->is64Bit() &&
17226 "LowerVAARG only handles 64-bit va_arg!");
17227 assert((Subtarget->isTargetLinux() ||
17228 Subtarget->isTargetDarwin()) &&
17229 "Unhandled target in LowerVAARG");
17230 assert(Op.getNode()->getNumOperands() == 4);
17231 SDValue Chain = Op.getOperand(0);
17232 SDValue SrcPtr = Op.getOperand(1);
17233 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17234 unsigned Align = Op.getConstantOperandVal(3);
17237 EVT ArgVT = Op.getNode()->getValueType(0);
17238 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
17239 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
17242 // Decide which area this value should be read from.
17243 // TODO: Implement the AMD64 ABI in its entirety. This simple
17244 // selection mechanism works only for the basic types.
17245 if (ArgVT == MVT::f80) {
17246 llvm_unreachable("va_arg for f80 not yet implemented");
17247 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
17248 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
17249 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
17250 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
17252 llvm_unreachable("Unhandled argument type in LowerVAARG");
17255 if (ArgMode == 2) {
17256 // Sanity Check: Make sure using fp_offset makes sense.
17257 assert(!DAG.getTarget().Options.UseSoftFloat &&
17258 !(DAG.getMachineFunction().getFunction()->hasFnAttribute(
17259 Attribute::NoImplicitFloat)) &&
17260 Subtarget->hasSSE1());
17263 // Insert VAARG_64 node into the DAG
17264 // VAARG_64 returns two values: Variable Argument Address, Chain
17265 SmallVector<SDValue, 11> InstOps;
17266 InstOps.push_back(Chain);
17267 InstOps.push_back(SrcPtr);
17268 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
17269 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
17270 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
17271 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
17272 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
17273 VTs, InstOps, MVT::i64,
17274 MachinePointerInfo(SV),
17276 /*Volatile=*/false,
17278 /*WriteMem=*/true);
17279 Chain = VAARG.getValue(1);
17281 // Load the next argument and return it
17282 return DAG.getLoad(ArgVT, dl,
17285 MachinePointerInfo(),
17286 false, false, false, 0);
17289 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
17290 SelectionDAG &DAG) {
17291 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
17292 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
17293 SDValue Chain = Op.getOperand(0);
17294 SDValue DstPtr = Op.getOperand(1);
17295 SDValue SrcPtr = Op.getOperand(2);
17296 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
17297 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17300 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
17301 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
17303 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
17306 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
17307 // amount is a constant. Takes immediate version of shift as input.
17308 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
17309 SDValue SrcOp, uint64_t ShiftAmt,
17310 SelectionDAG &DAG) {
17311 MVT ElementType = VT.getVectorElementType();
17313 // Fold this packed shift into its first operand if ShiftAmt is 0.
17317 // Check for ShiftAmt >= element width
17318 if (ShiftAmt >= ElementType.getSizeInBits()) {
17319 if (Opc == X86ISD::VSRAI)
17320 ShiftAmt = ElementType.getSizeInBits() - 1;
17322 return DAG.getConstant(0, VT);
17325 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
17326 && "Unknown target vector shift-by-constant node");
17328 // Fold this packed vector shift into a build vector if SrcOp is a
17329 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
17330 if (VT == SrcOp.getSimpleValueType() &&
17331 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
17332 SmallVector<SDValue, 8> Elts;
17333 unsigned NumElts = SrcOp->getNumOperands();
17334 ConstantSDNode *ND;
17337 default: llvm_unreachable(nullptr);
17338 case X86ISD::VSHLI:
17339 for (unsigned i=0; i!=NumElts; ++i) {
17340 SDValue CurrentOp = SrcOp->getOperand(i);
17341 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17342 Elts.push_back(CurrentOp);
17345 ND = cast<ConstantSDNode>(CurrentOp);
17346 const APInt &C = ND->getAPIntValue();
17347 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
17350 case X86ISD::VSRLI:
17351 for (unsigned i=0; i!=NumElts; ++i) {
17352 SDValue CurrentOp = SrcOp->getOperand(i);
17353 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17354 Elts.push_back(CurrentOp);
17357 ND = cast<ConstantSDNode>(CurrentOp);
17358 const APInt &C = ND->getAPIntValue();
17359 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
17362 case X86ISD::VSRAI:
17363 for (unsigned i=0; i!=NumElts; ++i) {
17364 SDValue CurrentOp = SrcOp->getOperand(i);
17365 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17366 Elts.push_back(CurrentOp);
17369 ND = cast<ConstantSDNode>(CurrentOp);
17370 const APInt &C = ND->getAPIntValue();
17371 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
17376 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
17379 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
17382 // getTargetVShiftNode - Handle vector element shifts where the shift amount
17383 // may or may not be a constant. Takes immediate version of shift as input.
17384 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
17385 SDValue SrcOp, SDValue ShAmt,
17386 SelectionDAG &DAG) {
17387 MVT SVT = ShAmt.getSimpleValueType();
17388 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
17390 // Catch shift-by-constant.
17391 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
17392 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
17393 CShAmt->getZExtValue(), DAG);
17395 // Change opcode to non-immediate version
17397 default: llvm_unreachable("Unknown target vector shift node");
17398 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
17399 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
17400 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
17403 const X86Subtarget &Subtarget =
17404 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17405 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
17406 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
17407 // Let the shuffle legalizer expand this shift amount node.
17408 SDValue Op0 = ShAmt.getOperand(0);
17409 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
17410 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
17412 // Need to build a vector containing shift amount.
17413 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
17414 SmallVector<SDValue, 4> ShOps;
17415 ShOps.push_back(ShAmt);
17416 if (SVT == MVT::i32) {
17417 ShOps.push_back(DAG.getConstant(0, SVT));
17418 ShOps.push_back(DAG.getUNDEF(SVT));
17420 ShOps.push_back(DAG.getUNDEF(SVT));
17422 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
17423 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
17426 // The return type has to be a 128-bit type with the same element
17427 // type as the input type.
17428 MVT EltVT = VT.getVectorElementType();
17429 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
17431 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
17432 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
17435 /// \brief Return (and \p Op, \p Mask) for compare instructions or
17436 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
17437 /// necessary casting for \p Mask when lowering masking intrinsics.
17438 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
17439 SDValue PreservedSrc,
17440 const X86Subtarget *Subtarget,
17441 SelectionDAG &DAG) {
17442 EVT VT = Op.getValueType();
17443 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
17444 MVT::i1, VT.getVectorNumElements());
17445 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17446 Mask.getValueType().getSizeInBits());
17449 assert(MaskVT.isSimple() && "invalid mask type");
17451 if (isAllOnes(Mask))
17454 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17455 // are extracted by EXTRACT_SUBVECTOR.
17456 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17457 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17458 DAG.getIntPtrConstant(0));
17460 switch (Op.getOpcode()) {
17462 case X86ISD::PCMPEQM:
17463 case X86ISD::PCMPGTM:
17465 case X86ISD::CMPMU:
17466 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17468 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17469 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17470 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17473 /// \brief Creates an SDNode for a predicated scalar operation.
17474 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17475 /// The mask is comming as MVT::i8 and it should be truncated
17476 /// to MVT::i1 while lowering masking intrinsics.
17477 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17478 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17479 /// a scalar instruction.
17480 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17481 SDValue PreservedSrc,
17482 const X86Subtarget *Subtarget,
17483 SelectionDAG &DAG) {
17484 if (isAllOnes(Mask))
17487 EVT VT = Op.getValueType();
17489 // The mask should be of type MVT::i1
17490 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17492 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17493 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17494 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17497 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17498 SelectionDAG &DAG) {
17500 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17501 EVT VT = Op.getValueType();
17502 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17504 switch(IntrData->Type) {
17505 case INTR_TYPE_1OP:
17506 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17507 case INTR_TYPE_2OP:
17508 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17510 case INTR_TYPE_3OP:
17511 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17512 Op.getOperand(2), Op.getOperand(3));
17513 case INTR_TYPE_1OP_MASK_RM: {
17514 SDValue Src = Op.getOperand(1);
17515 SDValue Src0 = Op.getOperand(2);
17516 SDValue Mask = Op.getOperand(3);
17517 SDValue RoundingMode = Op.getOperand(4);
17518 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17520 Mask, Src0, Subtarget, DAG);
17522 case INTR_TYPE_SCALAR_MASK_RM: {
17523 SDValue Src1 = Op.getOperand(1);
17524 SDValue Src2 = Op.getOperand(2);
17525 SDValue Src0 = Op.getOperand(3);
17526 SDValue Mask = Op.getOperand(4);
17527 SDValue RoundingMode = Op.getOperand(5);
17528 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17530 Mask, Src0, Subtarget, DAG);
17532 case INTR_TYPE_2OP_MASK: {
17533 SDValue Mask = Op.getOperand(4);
17534 SDValue PassThru = Op.getOperand(3);
17535 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17536 if (IntrWithRoundingModeOpcode != 0) {
17537 unsigned Round = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
17538 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17539 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17540 dl, Op.getValueType(),
17541 Op.getOperand(1), Op.getOperand(2),
17542 Op.getOperand(3), Op.getOperand(5)),
17543 Mask, PassThru, Subtarget, DAG);
17546 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17549 Mask, PassThru, Subtarget, DAG);
17551 case FMA_OP_MASK: {
17552 SDValue Src1 = Op.getOperand(1);
17553 SDValue Src2 = Op.getOperand(2);
17554 SDValue Src3 = Op.getOperand(3);
17555 SDValue Mask = Op.getOperand(4);
17556 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17557 if (IntrWithRoundingModeOpcode != 0) {
17558 SDValue Rnd = Op.getOperand(5);
17559 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17560 X86::STATIC_ROUNDING::CUR_DIRECTION)
17561 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17562 dl, Op.getValueType(),
17563 Src1, Src2, Src3, Rnd),
17564 Mask, Src1, Subtarget, DAG);
17566 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17567 dl, Op.getValueType(),
17569 Mask, Src1, Subtarget, DAG);
17572 case CMP_MASK_CC: {
17573 // Comparison intrinsics with masks.
17574 // Example of transformation:
17575 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17576 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17578 // (v8i1 (insert_subvector undef,
17579 // (v2i1 (and (PCMPEQM %a, %b),
17580 // (extract_subvector
17581 // (v8i1 (bitcast %mask)), 0))), 0))))
17582 EVT VT = Op.getOperand(1).getValueType();
17583 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17584 VT.getVectorNumElements());
17585 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17586 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17587 Mask.getValueType().getSizeInBits());
17589 if (IntrData->Type == CMP_MASK_CC) {
17590 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17591 Op.getOperand(2), Op.getOperand(3));
17593 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17594 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17597 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17598 DAG.getTargetConstant(0, MaskVT),
17600 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17601 DAG.getUNDEF(BitcastVT), CmpMask,
17602 DAG.getIntPtrConstant(0));
17603 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17605 case COMI: { // Comparison intrinsics
17606 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17607 SDValue LHS = Op.getOperand(1);
17608 SDValue RHS = Op.getOperand(2);
17609 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17610 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17611 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17612 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17613 DAG.getConstant(X86CC, MVT::i8), Cond);
17614 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17617 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17618 Op.getOperand(1), Op.getOperand(2), DAG);
17620 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17621 Op.getSimpleValueType(),
17623 Op.getOperand(2), DAG),
17624 Op.getOperand(4), Op.getOperand(3), Subtarget,
17626 case COMPRESS_EXPAND_IN_REG: {
17627 SDValue Mask = Op.getOperand(3);
17628 SDValue DataToCompress = Op.getOperand(1);
17629 SDValue PassThru = Op.getOperand(2);
17630 if (isAllOnes(Mask)) // return data as is
17631 return Op.getOperand(1);
17632 EVT VT = Op.getValueType();
17633 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17634 VT.getVectorNumElements());
17635 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17636 Mask.getValueType().getSizeInBits());
17638 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17639 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17640 DAG.getIntPtrConstant(0));
17642 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17646 SDValue Mask = Op.getOperand(3);
17647 EVT VT = Op.getValueType();
17648 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17649 VT.getVectorNumElements());
17650 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17651 Mask.getValueType().getSizeInBits());
17653 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17654 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17655 DAG.getIntPtrConstant(0));
17656 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17665 default: return SDValue(); // Don't custom lower most intrinsics.
17667 case Intrinsic::x86_avx512_mask_valign_q_512:
17668 case Intrinsic::x86_avx512_mask_valign_d_512:
17669 // Vector source operands are swapped.
17670 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17671 Op.getValueType(), Op.getOperand(2),
17674 Op.getOperand(5), Op.getOperand(4),
17677 // ptest and testp intrinsics. The intrinsic these come from are designed to
17678 // return an integer value, not just an instruction so lower it to the ptest
17679 // or testp pattern and a setcc for the result.
17680 case Intrinsic::x86_sse41_ptestz:
17681 case Intrinsic::x86_sse41_ptestc:
17682 case Intrinsic::x86_sse41_ptestnzc:
17683 case Intrinsic::x86_avx_ptestz_256:
17684 case Intrinsic::x86_avx_ptestc_256:
17685 case Intrinsic::x86_avx_ptestnzc_256:
17686 case Intrinsic::x86_avx_vtestz_ps:
17687 case Intrinsic::x86_avx_vtestc_ps:
17688 case Intrinsic::x86_avx_vtestnzc_ps:
17689 case Intrinsic::x86_avx_vtestz_pd:
17690 case Intrinsic::x86_avx_vtestc_pd:
17691 case Intrinsic::x86_avx_vtestnzc_pd:
17692 case Intrinsic::x86_avx_vtestz_ps_256:
17693 case Intrinsic::x86_avx_vtestc_ps_256:
17694 case Intrinsic::x86_avx_vtestnzc_ps_256:
17695 case Intrinsic::x86_avx_vtestz_pd_256:
17696 case Intrinsic::x86_avx_vtestc_pd_256:
17697 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17698 bool IsTestPacked = false;
17701 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17702 case Intrinsic::x86_avx_vtestz_ps:
17703 case Intrinsic::x86_avx_vtestz_pd:
17704 case Intrinsic::x86_avx_vtestz_ps_256:
17705 case Intrinsic::x86_avx_vtestz_pd_256:
17706 IsTestPacked = true; // Fallthrough
17707 case Intrinsic::x86_sse41_ptestz:
17708 case Intrinsic::x86_avx_ptestz_256:
17710 X86CC = X86::COND_E;
17712 case Intrinsic::x86_avx_vtestc_ps:
17713 case Intrinsic::x86_avx_vtestc_pd:
17714 case Intrinsic::x86_avx_vtestc_ps_256:
17715 case Intrinsic::x86_avx_vtestc_pd_256:
17716 IsTestPacked = true; // Fallthrough
17717 case Intrinsic::x86_sse41_ptestc:
17718 case Intrinsic::x86_avx_ptestc_256:
17720 X86CC = X86::COND_B;
17722 case Intrinsic::x86_avx_vtestnzc_ps:
17723 case Intrinsic::x86_avx_vtestnzc_pd:
17724 case Intrinsic::x86_avx_vtestnzc_ps_256:
17725 case Intrinsic::x86_avx_vtestnzc_pd_256:
17726 IsTestPacked = true; // Fallthrough
17727 case Intrinsic::x86_sse41_ptestnzc:
17728 case Intrinsic::x86_avx_ptestnzc_256:
17730 X86CC = X86::COND_A;
17734 SDValue LHS = Op.getOperand(1);
17735 SDValue RHS = Op.getOperand(2);
17736 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17737 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17738 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17739 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17740 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17742 case Intrinsic::x86_avx512_kortestz_w:
17743 case Intrinsic::x86_avx512_kortestc_w: {
17744 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17745 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17746 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17747 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17748 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17749 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17750 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17753 case Intrinsic::x86_sse42_pcmpistria128:
17754 case Intrinsic::x86_sse42_pcmpestria128:
17755 case Intrinsic::x86_sse42_pcmpistric128:
17756 case Intrinsic::x86_sse42_pcmpestric128:
17757 case Intrinsic::x86_sse42_pcmpistrio128:
17758 case Intrinsic::x86_sse42_pcmpestrio128:
17759 case Intrinsic::x86_sse42_pcmpistris128:
17760 case Intrinsic::x86_sse42_pcmpestris128:
17761 case Intrinsic::x86_sse42_pcmpistriz128:
17762 case Intrinsic::x86_sse42_pcmpestriz128: {
17766 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17767 case Intrinsic::x86_sse42_pcmpistria128:
17768 Opcode = X86ISD::PCMPISTRI;
17769 X86CC = X86::COND_A;
17771 case Intrinsic::x86_sse42_pcmpestria128:
17772 Opcode = X86ISD::PCMPESTRI;
17773 X86CC = X86::COND_A;
17775 case Intrinsic::x86_sse42_pcmpistric128:
17776 Opcode = X86ISD::PCMPISTRI;
17777 X86CC = X86::COND_B;
17779 case Intrinsic::x86_sse42_pcmpestric128:
17780 Opcode = X86ISD::PCMPESTRI;
17781 X86CC = X86::COND_B;
17783 case Intrinsic::x86_sse42_pcmpistrio128:
17784 Opcode = X86ISD::PCMPISTRI;
17785 X86CC = X86::COND_O;
17787 case Intrinsic::x86_sse42_pcmpestrio128:
17788 Opcode = X86ISD::PCMPESTRI;
17789 X86CC = X86::COND_O;
17791 case Intrinsic::x86_sse42_pcmpistris128:
17792 Opcode = X86ISD::PCMPISTRI;
17793 X86CC = X86::COND_S;
17795 case Intrinsic::x86_sse42_pcmpestris128:
17796 Opcode = X86ISD::PCMPESTRI;
17797 X86CC = X86::COND_S;
17799 case Intrinsic::x86_sse42_pcmpistriz128:
17800 Opcode = X86ISD::PCMPISTRI;
17801 X86CC = X86::COND_E;
17803 case Intrinsic::x86_sse42_pcmpestriz128:
17804 Opcode = X86ISD::PCMPESTRI;
17805 X86CC = X86::COND_E;
17808 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17809 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17810 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17811 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17812 DAG.getConstant(X86CC, MVT::i8),
17813 SDValue(PCMP.getNode(), 1));
17814 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17817 case Intrinsic::x86_sse42_pcmpistri128:
17818 case Intrinsic::x86_sse42_pcmpestri128: {
17820 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17821 Opcode = X86ISD::PCMPISTRI;
17823 Opcode = X86ISD::PCMPESTRI;
17825 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17826 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17827 return DAG.getNode(Opcode, dl, VTs, NewOps);
17832 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17833 SDValue Src, SDValue Mask, SDValue Base,
17834 SDValue Index, SDValue ScaleOp, SDValue Chain,
17835 const X86Subtarget * Subtarget) {
17837 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17838 assert(C && "Invalid scale type");
17839 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17840 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17841 Index.getSimpleValueType().getVectorNumElements());
17843 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17845 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17847 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17848 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17849 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17850 SDValue Segment = DAG.getRegister(0, MVT::i32);
17851 if (Src.getOpcode() == ISD::UNDEF)
17852 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17853 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17854 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17855 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17856 return DAG.getMergeValues(RetOps, dl);
17859 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17860 SDValue Src, SDValue Mask, SDValue Base,
17861 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17863 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17864 assert(C && "Invalid scale type");
17865 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17866 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17867 SDValue Segment = DAG.getRegister(0, MVT::i32);
17868 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17869 Index.getSimpleValueType().getVectorNumElements());
17871 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17873 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17875 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17876 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17877 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17878 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17879 return SDValue(Res, 1);
17882 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17883 SDValue Mask, SDValue Base, SDValue Index,
17884 SDValue ScaleOp, SDValue Chain) {
17886 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17887 assert(C && "Invalid scale type");
17888 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17889 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17890 SDValue Segment = DAG.getRegister(0, MVT::i32);
17892 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17894 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17896 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17898 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17899 //SDVTList VTs = DAG.getVTList(MVT::Other);
17900 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17901 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17902 return SDValue(Res, 0);
17905 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17906 // read performance monitor counters (x86_rdpmc).
17907 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17908 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17909 SmallVectorImpl<SDValue> &Results) {
17910 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17911 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17914 // The ECX register is used to select the index of the performance counter
17916 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17918 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17920 // Reads the content of a 64-bit performance counter and returns it in the
17921 // registers EDX:EAX.
17922 if (Subtarget->is64Bit()) {
17923 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17924 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17927 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17928 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17931 Chain = HI.getValue(1);
17933 if (Subtarget->is64Bit()) {
17934 // The EAX register is loaded with the low-order 32 bits. The EDX register
17935 // is loaded with the supported high-order bits of the counter.
17936 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17937 DAG.getConstant(32, MVT::i8));
17938 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17939 Results.push_back(Chain);
17943 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17944 SDValue Ops[] = { LO, HI };
17945 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17946 Results.push_back(Pair);
17947 Results.push_back(Chain);
17950 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17951 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17952 // also used to custom lower READCYCLECOUNTER nodes.
17953 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17954 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17955 SmallVectorImpl<SDValue> &Results) {
17956 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17957 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17960 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17961 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17962 // and the EAX register is loaded with the low-order 32 bits.
17963 if (Subtarget->is64Bit()) {
17964 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17965 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17968 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17969 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17972 SDValue Chain = HI.getValue(1);
17974 if (Opcode == X86ISD::RDTSCP_DAG) {
17975 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17977 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17978 // the ECX register. Add 'ecx' explicitly to the chain.
17979 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17981 // Explicitly store the content of ECX at the location passed in input
17982 // to the 'rdtscp' intrinsic.
17983 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17984 MachinePointerInfo(), false, false, 0);
17987 if (Subtarget->is64Bit()) {
17988 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17989 // the EAX register is loaded with the low-order 32 bits.
17990 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17991 DAG.getConstant(32, MVT::i8));
17992 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17993 Results.push_back(Chain);
17997 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17998 SDValue Ops[] = { LO, HI };
17999 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
18000 Results.push_back(Pair);
18001 Results.push_back(Chain);
18004 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
18005 SelectionDAG &DAG) {
18006 SmallVector<SDValue, 2> Results;
18008 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
18010 return DAG.getMergeValues(Results, DL);
18014 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
18015 SelectionDAG &DAG) {
18016 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
18018 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
18023 switch(IntrData->Type) {
18025 llvm_unreachable("Unknown Intrinsic Type");
18029 // Emit the node with the right value type.
18030 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
18031 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
18033 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
18034 // Otherwise return the value from Rand, which is always 0, casted to i32.
18035 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
18036 DAG.getConstant(1, Op->getValueType(1)),
18037 DAG.getConstant(X86::COND_B, MVT::i32),
18038 SDValue(Result.getNode(), 1) };
18039 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
18040 DAG.getVTList(Op->getValueType(1), MVT::Glue),
18043 // Return { result, isValid, chain }.
18044 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
18045 SDValue(Result.getNode(), 2));
18048 //gather(v1, mask, index, base, scale);
18049 SDValue Chain = Op.getOperand(0);
18050 SDValue Src = Op.getOperand(2);
18051 SDValue Base = Op.getOperand(3);
18052 SDValue Index = Op.getOperand(4);
18053 SDValue Mask = Op.getOperand(5);
18054 SDValue Scale = Op.getOperand(6);
18055 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
18059 //scatter(base, mask, index, v1, scale);
18060 SDValue Chain = Op.getOperand(0);
18061 SDValue Base = Op.getOperand(2);
18062 SDValue Mask = Op.getOperand(3);
18063 SDValue Index = Op.getOperand(4);
18064 SDValue Src = Op.getOperand(5);
18065 SDValue Scale = Op.getOperand(6);
18066 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
18069 SDValue Hint = Op.getOperand(6);
18071 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
18072 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
18073 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
18074 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
18075 SDValue Chain = Op.getOperand(0);
18076 SDValue Mask = Op.getOperand(2);
18077 SDValue Index = Op.getOperand(3);
18078 SDValue Base = Op.getOperand(4);
18079 SDValue Scale = Op.getOperand(5);
18080 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
18082 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
18084 SmallVector<SDValue, 2> Results;
18085 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
18086 return DAG.getMergeValues(Results, dl);
18088 // Read Performance Monitoring Counters.
18090 SmallVector<SDValue, 2> Results;
18091 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
18092 return DAG.getMergeValues(Results, dl);
18094 // XTEST intrinsics.
18096 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
18097 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
18098 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18099 DAG.getConstant(X86::COND_NE, MVT::i8),
18101 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
18102 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
18103 Ret, SDValue(InTrans.getNode(), 1));
18107 SmallVector<SDValue, 2> Results;
18108 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
18109 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
18110 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
18111 DAG.getConstant(-1, MVT::i8));
18112 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
18113 Op.getOperand(4), GenCF.getValue(1));
18114 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
18115 Op.getOperand(5), MachinePointerInfo(),
18117 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
18118 DAG.getConstant(X86::COND_B, MVT::i8),
18120 Results.push_back(SetCC);
18121 Results.push_back(Store);
18122 return DAG.getMergeValues(Results, dl);
18124 case COMPRESS_TO_MEM: {
18126 SDValue Mask = Op.getOperand(4);
18127 SDValue DataToCompress = Op.getOperand(3);
18128 SDValue Addr = Op.getOperand(2);
18129 SDValue Chain = Op.getOperand(0);
18131 if (isAllOnes(Mask)) // return just a store
18132 return DAG.getStore(Chain, dl, DataToCompress, Addr,
18133 MachinePointerInfo(), false, false, 0);
18135 EVT VT = DataToCompress.getValueType();
18136 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18137 VT.getVectorNumElements());
18138 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18139 Mask.getValueType().getSizeInBits());
18140 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18141 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18142 DAG.getIntPtrConstant(0));
18144 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
18145 DataToCompress, DAG.getUNDEF(VT));
18146 return DAG.getStore(Chain, dl, Compressed, Addr,
18147 MachinePointerInfo(), false, false, 0);
18149 case EXPAND_FROM_MEM: {
18151 SDValue Mask = Op.getOperand(4);
18152 SDValue PathThru = Op.getOperand(3);
18153 SDValue Addr = Op.getOperand(2);
18154 SDValue Chain = Op.getOperand(0);
18155 EVT VT = Op.getValueType();
18157 if (isAllOnes(Mask)) // return just a load
18158 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
18160 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18161 VT.getVectorNumElements());
18162 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18163 Mask.getValueType().getSizeInBits());
18164 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18165 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18166 DAG.getIntPtrConstant(0));
18168 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
18169 false, false, false, 0);
18171 SmallVector<SDValue, 2> Results;
18172 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
18174 Results.push_back(Chain);
18175 return DAG.getMergeValues(Results, dl);
18180 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
18181 SelectionDAG &DAG) const {
18182 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
18183 MFI->setReturnAddressIsTaken(true);
18185 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
18188 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18190 EVT PtrVT = getPointerTy();
18193 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
18194 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18195 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
18196 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18197 DAG.getNode(ISD::ADD, dl, PtrVT,
18198 FrameAddr, Offset),
18199 MachinePointerInfo(), false, false, false, 0);
18202 // Just load the return address.
18203 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
18204 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18205 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
18208 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
18209 MachineFunction &MF = DAG.getMachineFunction();
18210 MachineFrameInfo *MFI = MF.getFrameInfo();
18211 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
18212 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18213 EVT VT = Op.getValueType();
18215 MFI->setFrameAddressIsTaken(true);
18217 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
18218 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
18219 // is not possible to crawl up the stack without looking at the unwind codes
18221 int FrameAddrIndex = FuncInfo->getFAIndex();
18222 if (!FrameAddrIndex) {
18223 // Set up a frame object for the return address.
18224 unsigned SlotSize = RegInfo->getSlotSize();
18225 FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
18226 SlotSize, /*Offset=*/INT64_MIN, /*IsImmutable=*/false);
18227 FuncInfo->setFAIndex(FrameAddrIndex);
18229 return DAG.getFrameIndex(FrameAddrIndex, VT);
18232 unsigned FrameReg =
18233 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
18234 SDLoc dl(Op); // FIXME probably not meaningful
18235 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18236 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
18237 (FrameReg == X86::EBP && VT == MVT::i32)) &&
18238 "Invalid Frame Register!");
18239 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
18241 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
18242 MachinePointerInfo(),
18243 false, false, false, 0);
18247 // FIXME? Maybe this could be a TableGen attribute on some registers and
18248 // this table could be generated automatically from RegInfo.
18249 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
18251 unsigned Reg = StringSwitch<unsigned>(RegName)
18252 .Case("esp", X86::ESP)
18253 .Case("rsp", X86::RSP)
18257 report_fatal_error("Invalid register name global variable");
18260 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
18261 SelectionDAG &DAG) const {
18262 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18263 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
18266 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
18267 SDValue Chain = Op.getOperand(0);
18268 SDValue Offset = Op.getOperand(1);
18269 SDValue Handler = Op.getOperand(2);
18272 EVT PtrVT = getPointerTy();
18273 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18274 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
18275 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
18276 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
18277 "Invalid Frame Register!");
18278 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
18279 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
18281 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
18282 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
18283 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
18284 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
18286 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
18288 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
18289 DAG.getRegister(StoreAddrReg, PtrVT));
18292 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
18293 SelectionDAG &DAG) const {
18295 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
18296 DAG.getVTList(MVT::i32, MVT::Other),
18297 Op.getOperand(0), Op.getOperand(1));
18300 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
18301 SelectionDAG &DAG) const {
18303 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
18304 Op.getOperand(0), Op.getOperand(1));
18307 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
18308 return Op.getOperand(0);
18311 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
18312 SelectionDAG &DAG) const {
18313 SDValue Root = Op.getOperand(0);
18314 SDValue Trmp = Op.getOperand(1); // trampoline
18315 SDValue FPtr = Op.getOperand(2); // nested function
18316 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
18319 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
18320 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
18322 if (Subtarget->is64Bit()) {
18323 SDValue OutChains[6];
18325 // Large code-model.
18326 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
18327 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
18329 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
18330 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
18332 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
18334 // Load the pointer to the nested function into R11.
18335 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
18336 SDValue Addr = Trmp;
18337 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18338 Addr, MachinePointerInfo(TrmpAddr),
18341 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18342 DAG.getConstant(2, MVT::i64));
18343 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
18344 MachinePointerInfo(TrmpAddr, 2),
18347 // Load the 'nest' parameter value into R10.
18348 // R10 is specified in X86CallingConv.td
18349 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
18350 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18351 DAG.getConstant(10, MVT::i64));
18352 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18353 Addr, MachinePointerInfo(TrmpAddr, 10),
18356 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18357 DAG.getConstant(12, MVT::i64));
18358 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
18359 MachinePointerInfo(TrmpAddr, 12),
18362 // Jump to the nested function.
18363 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
18364 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18365 DAG.getConstant(20, MVT::i64));
18366 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18367 Addr, MachinePointerInfo(TrmpAddr, 20),
18370 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
18371 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18372 DAG.getConstant(22, MVT::i64));
18373 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
18374 MachinePointerInfo(TrmpAddr, 22),
18377 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18379 const Function *Func =
18380 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
18381 CallingConv::ID CC = Func->getCallingConv();
18386 llvm_unreachable("Unsupported calling convention");
18387 case CallingConv::C:
18388 case CallingConv::X86_StdCall: {
18389 // Pass 'nest' parameter in ECX.
18390 // Must be kept in sync with X86CallingConv.td
18391 NestReg = X86::ECX;
18393 // Check that ECX wasn't needed by an 'inreg' parameter.
18394 FunctionType *FTy = Func->getFunctionType();
18395 const AttributeSet &Attrs = Func->getAttributes();
18397 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18398 unsigned InRegCount = 0;
18401 for (FunctionType::param_iterator I = FTy->param_begin(),
18402 E = FTy->param_end(); I != E; ++I, ++Idx)
18403 if (Attrs.hasAttribute(Idx, Attribute::InReg))
18404 // FIXME: should only count parameters that are lowered to integers.
18405 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
18407 if (InRegCount > 2) {
18408 report_fatal_error("Nest register in use - reduce number of inreg"
18414 case CallingConv::X86_FastCall:
18415 case CallingConv::X86_ThisCall:
18416 case CallingConv::Fast:
18417 // Pass 'nest' parameter in EAX.
18418 // Must be kept in sync with X86CallingConv.td
18419 NestReg = X86::EAX;
18423 SDValue OutChains[4];
18424 SDValue Addr, Disp;
18426 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18427 DAG.getConstant(10, MVT::i32));
18428 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18430 // This is storing the opcode for MOV32ri.
18431 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18432 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18433 OutChains[0] = DAG.getStore(Root, dl,
18434 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
18435 Trmp, MachinePointerInfo(TrmpAddr),
18438 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18439 DAG.getConstant(1, MVT::i32));
18440 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
18441 MachinePointerInfo(TrmpAddr, 1),
18444 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18445 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18446 DAG.getConstant(5, MVT::i32));
18447 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
18448 MachinePointerInfo(TrmpAddr, 5),
18451 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18452 DAG.getConstant(6, MVT::i32));
18453 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18454 MachinePointerInfo(TrmpAddr, 6),
18457 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18461 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18462 SelectionDAG &DAG) const {
18464 The rounding mode is in bits 11:10 of FPSR, and has the following
18466 00 Round to nearest
18471 FLT_ROUNDS, on the other hand, expects the following:
18478 To perform the conversion, we do:
18479 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18482 MachineFunction &MF = DAG.getMachineFunction();
18483 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
18484 unsigned StackAlignment = TFI.getStackAlignment();
18485 MVT VT = Op.getSimpleValueType();
18488 // Save FP Control Word to stack slot
18489 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18490 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18492 MachineMemOperand *MMO =
18493 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18494 MachineMemOperand::MOStore, 2, 2);
18496 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18497 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18498 DAG.getVTList(MVT::Other),
18499 Ops, MVT::i16, MMO);
18501 // Load FP Control Word from stack slot
18502 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18503 MachinePointerInfo(), false, false, false, 0);
18505 // Transform as necessary
18507 DAG.getNode(ISD::SRL, DL, MVT::i16,
18508 DAG.getNode(ISD::AND, DL, MVT::i16,
18509 CWD, DAG.getConstant(0x800, MVT::i16)),
18510 DAG.getConstant(11, MVT::i8));
18512 DAG.getNode(ISD::SRL, DL, MVT::i16,
18513 DAG.getNode(ISD::AND, DL, MVT::i16,
18514 CWD, DAG.getConstant(0x400, MVT::i16)),
18515 DAG.getConstant(9, MVT::i8));
18518 DAG.getNode(ISD::AND, DL, MVT::i16,
18519 DAG.getNode(ISD::ADD, DL, MVT::i16,
18520 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18521 DAG.getConstant(1, MVT::i16)),
18522 DAG.getConstant(3, MVT::i16));
18524 return DAG.getNode((VT.getSizeInBits() < 16 ?
18525 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18528 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18529 MVT VT = Op.getSimpleValueType();
18531 unsigned NumBits = VT.getSizeInBits();
18534 Op = Op.getOperand(0);
18535 if (VT == MVT::i8) {
18536 // Zero extend to i32 since there is not an i8 bsr.
18538 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18541 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18542 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18543 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18545 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18548 DAG.getConstant(NumBits+NumBits-1, OpVT),
18549 DAG.getConstant(X86::COND_E, MVT::i8),
18552 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18554 // Finally xor with NumBits-1.
18555 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18558 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18562 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18563 MVT VT = Op.getSimpleValueType();
18565 unsigned NumBits = VT.getSizeInBits();
18568 Op = Op.getOperand(0);
18569 if (VT == MVT::i8) {
18570 // Zero extend to i32 since there is not an i8 bsr.
18572 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18575 // Issue a bsr (scan bits in reverse).
18576 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18577 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18579 // And xor with NumBits-1.
18580 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18583 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18587 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18588 MVT VT = Op.getSimpleValueType();
18589 unsigned NumBits = VT.getSizeInBits();
18591 Op = Op.getOperand(0);
18593 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18594 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18595 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18597 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18600 DAG.getConstant(NumBits, VT),
18601 DAG.getConstant(X86::COND_E, MVT::i8),
18604 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18607 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18608 // ones, and then concatenate the result back.
18609 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18610 MVT VT = Op.getSimpleValueType();
18612 assert(VT.is256BitVector() && VT.isInteger() &&
18613 "Unsupported value type for operation");
18615 unsigned NumElems = VT.getVectorNumElements();
18618 // Extract the LHS vectors
18619 SDValue LHS = Op.getOperand(0);
18620 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18621 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18623 // Extract the RHS vectors
18624 SDValue RHS = Op.getOperand(1);
18625 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18626 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18628 MVT EltVT = VT.getVectorElementType();
18629 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18631 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18632 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18633 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18636 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18637 assert(Op.getSimpleValueType().is256BitVector() &&
18638 Op.getSimpleValueType().isInteger() &&
18639 "Only handle AVX 256-bit vector integer operation");
18640 return Lower256IntArith(Op, DAG);
18643 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18644 assert(Op.getSimpleValueType().is256BitVector() &&
18645 Op.getSimpleValueType().isInteger() &&
18646 "Only handle AVX 256-bit vector integer operation");
18647 return Lower256IntArith(Op, DAG);
18650 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18651 SelectionDAG &DAG) {
18653 MVT VT = Op.getSimpleValueType();
18655 // Decompose 256-bit ops into smaller 128-bit ops.
18656 if (VT.is256BitVector() && !Subtarget->hasInt256())
18657 return Lower256IntArith(Op, DAG);
18659 SDValue A = Op.getOperand(0);
18660 SDValue B = Op.getOperand(1);
18662 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18663 if (VT == MVT::v4i32) {
18664 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18665 "Should not custom lower when pmuldq is available!");
18667 // Extract the odd parts.
18668 static const int UnpackMask[] = { 1, -1, 3, -1 };
18669 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18670 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18672 // Multiply the even parts.
18673 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18674 // Now multiply odd parts.
18675 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18677 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18678 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18680 // Merge the two vectors back together with a shuffle. This expands into 2
18682 static const int ShufMask[] = { 0, 4, 2, 6 };
18683 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18686 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18687 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18689 // Ahi = psrlqi(a, 32);
18690 // Bhi = psrlqi(b, 32);
18692 // AloBlo = pmuludq(a, b);
18693 // AloBhi = pmuludq(a, Bhi);
18694 // AhiBlo = pmuludq(Ahi, b);
18696 // AloBhi = psllqi(AloBhi, 32);
18697 // AhiBlo = psllqi(AhiBlo, 32);
18698 // return AloBlo + AloBhi + AhiBlo;
18700 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18701 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18703 // Bit cast to 32-bit vectors for MULUDQ
18704 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18705 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18706 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18707 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18708 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18709 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18711 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18712 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18713 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18715 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18716 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18718 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18719 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18722 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18723 assert(Subtarget->isTargetWin64() && "Unexpected target");
18724 EVT VT = Op.getValueType();
18725 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18726 "Unexpected return type for lowering");
18730 switch (Op->getOpcode()) {
18731 default: llvm_unreachable("Unexpected request for libcall!");
18732 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18733 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18734 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18735 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18736 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18737 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18741 SDValue InChain = DAG.getEntryNode();
18743 TargetLowering::ArgListTy Args;
18744 TargetLowering::ArgListEntry Entry;
18745 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18746 EVT ArgVT = Op->getOperand(i).getValueType();
18747 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18748 "Unexpected argument type for lowering");
18749 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18750 Entry.Node = StackPtr;
18751 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18753 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18754 Entry.Ty = PointerType::get(ArgTy,0);
18755 Entry.isSExt = false;
18756 Entry.isZExt = false;
18757 Args.push_back(Entry);
18760 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18763 TargetLowering::CallLoweringInfo CLI(DAG);
18764 CLI.setDebugLoc(dl).setChain(InChain)
18765 .setCallee(getLibcallCallingConv(LC),
18766 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18767 Callee, std::move(Args), 0)
18768 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18770 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18771 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18774 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18775 SelectionDAG &DAG) {
18776 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18777 EVT VT = Op0.getValueType();
18780 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18781 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18783 // PMULxD operations multiply each even value (starting at 0) of LHS with
18784 // the related value of RHS and produce a widen result.
18785 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18786 // => <2 x i64> <ae|cg>
18788 // In other word, to have all the results, we need to perform two PMULxD:
18789 // 1. one with the even values.
18790 // 2. one with the odd values.
18791 // To achieve #2, with need to place the odd values at an even position.
18793 // Place the odd value at an even position (basically, shift all values 1
18794 // step to the left):
18795 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18796 // <a|b|c|d> => <b|undef|d|undef>
18797 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18798 // <e|f|g|h> => <f|undef|h|undef>
18799 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18801 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18803 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18804 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18806 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18807 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18808 // => <2 x i64> <ae|cg>
18809 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18810 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18811 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18812 // => <2 x i64> <bf|dh>
18813 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18814 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18816 // Shuffle it back into the right order.
18817 SDValue Highs, Lows;
18818 if (VT == MVT::v8i32) {
18819 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18820 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18821 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18822 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18824 const int HighMask[] = {1, 5, 3, 7};
18825 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18826 const int LowMask[] = {0, 4, 2, 6};
18827 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18830 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18831 // unsigned multiply.
18832 if (IsSigned && !Subtarget->hasSSE41()) {
18834 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18835 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18836 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18837 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18838 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18840 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18841 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18844 // The first result of MUL_LOHI is actually the low value, followed by the
18846 SDValue Ops[] = {Lows, Highs};
18847 return DAG.getMergeValues(Ops, dl);
18850 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18851 const X86Subtarget *Subtarget) {
18852 MVT VT = Op.getSimpleValueType();
18854 SDValue R = Op.getOperand(0);
18855 SDValue Amt = Op.getOperand(1);
18857 // Optimize shl/srl/sra with constant shift amount.
18858 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18859 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18860 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18862 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18863 (Subtarget->hasInt256() &&
18864 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18865 (Subtarget->hasAVX512() &&
18866 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18867 if (Op.getOpcode() == ISD::SHL)
18868 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18870 if (Op.getOpcode() == ISD::SRL)
18871 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18873 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18874 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18878 if (VT == MVT::v16i8) {
18879 if (Op.getOpcode() == ISD::SHL) {
18880 // Make a large shift.
18881 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18882 MVT::v8i16, R, ShiftAmt,
18884 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18885 // Zero out the rightmost bits.
18886 SmallVector<SDValue, 16> V(16,
18887 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18889 return DAG.getNode(ISD::AND, dl, VT, SHL,
18890 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18892 if (Op.getOpcode() == ISD::SRL) {
18893 // Make a large shift.
18894 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18895 MVT::v8i16, R, ShiftAmt,
18897 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18898 // Zero out the leftmost bits.
18899 SmallVector<SDValue, 16> V(16,
18900 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18902 return DAG.getNode(ISD::AND, dl, VT, SRL,
18903 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18905 if (Op.getOpcode() == ISD::SRA) {
18906 if (ShiftAmt == 7) {
18907 // R s>> 7 === R s< 0
18908 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18909 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18912 // R s>> a === ((R u>> a) ^ m) - m
18913 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18914 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18916 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18917 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18918 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18921 llvm_unreachable("Unknown shift opcode.");
18924 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18925 if (Op.getOpcode() == ISD::SHL) {
18926 // Make a large shift.
18927 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18928 MVT::v16i16, R, ShiftAmt,
18930 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18931 // Zero out the rightmost bits.
18932 SmallVector<SDValue, 32> V(32,
18933 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18935 return DAG.getNode(ISD::AND, dl, VT, SHL,
18936 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18938 if (Op.getOpcode() == ISD::SRL) {
18939 // Make a large shift.
18940 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18941 MVT::v16i16, R, ShiftAmt,
18943 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18944 // Zero out the leftmost bits.
18945 SmallVector<SDValue, 32> V(32,
18946 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18948 return DAG.getNode(ISD::AND, dl, VT, SRL,
18949 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18951 if (Op.getOpcode() == ISD::SRA) {
18952 if (ShiftAmt == 7) {
18953 // R s>> 7 === R s< 0
18954 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18955 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18958 // R s>> a === ((R u>> a) ^ m) - m
18959 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18960 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18962 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18963 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18964 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18967 llvm_unreachable("Unknown shift opcode.");
18972 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18973 if (!Subtarget->is64Bit() &&
18974 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18975 Amt.getOpcode() == ISD::BITCAST &&
18976 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18977 Amt = Amt.getOperand(0);
18978 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18979 VT.getVectorNumElements();
18980 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18981 uint64_t ShiftAmt = 0;
18982 for (unsigned i = 0; i != Ratio; ++i) {
18983 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18987 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18989 // Check remaining shift amounts.
18990 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18991 uint64_t ShAmt = 0;
18992 for (unsigned j = 0; j != Ratio; ++j) {
18993 ConstantSDNode *C =
18994 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18998 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
19000 if (ShAmt != ShiftAmt)
19003 switch (Op.getOpcode()) {
19005 llvm_unreachable("Unknown shift opcode!");
19007 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
19010 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
19013 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
19021 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
19022 const X86Subtarget* Subtarget) {
19023 MVT VT = Op.getSimpleValueType();
19025 SDValue R = Op.getOperand(0);
19026 SDValue Amt = Op.getOperand(1);
19028 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
19029 VT == MVT::v4i32 || VT == MVT::v8i16 ||
19030 (Subtarget->hasInt256() &&
19031 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
19032 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
19033 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
19035 EVT EltVT = VT.getVectorElementType();
19037 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
19038 // Check if this build_vector node is doing a splat.
19039 // If so, then set BaseShAmt equal to the splat value.
19040 BaseShAmt = BV->getSplatValue();
19041 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
19042 BaseShAmt = SDValue();
19044 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
19045 Amt = Amt.getOperand(0);
19047 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
19048 if (SVN && SVN->isSplat()) {
19049 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
19050 SDValue InVec = Amt.getOperand(0);
19051 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
19052 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
19053 "Unexpected shuffle index found!");
19054 BaseShAmt = InVec.getOperand(SplatIdx);
19055 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
19056 if (ConstantSDNode *C =
19057 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
19058 if (C->getZExtValue() == SplatIdx)
19059 BaseShAmt = InVec.getOperand(1);
19064 // Avoid introducing an extract element from a shuffle.
19065 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
19066 DAG.getIntPtrConstant(SplatIdx));
19070 if (BaseShAmt.getNode()) {
19071 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
19072 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
19073 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
19074 else if (EltVT.bitsLT(MVT::i32))
19075 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
19077 switch (Op.getOpcode()) {
19079 llvm_unreachable("Unknown shift opcode!");
19081 switch (VT.SimpleTy) {
19082 default: return SDValue();
19091 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
19094 switch (VT.SimpleTy) {
19095 default: return SDValue();
19102 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
19105 switch (VT.SimpleTy) {
19106 default: return SDValue();
19115 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
19121 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
19122 if (!Subtarget->is64Bit() &&
19123 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
19124 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
19125 Amt.getOpcode() == ISD::BITCAST &&
19126 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
19127 Amt = Amt.getOperand(0);
19128 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
19129 VT.getVectorNumElements();
19130 std::vector<SDValue> Vals(Ratio);
19131 for (unsigned i = 0; i != Ratio; ++i)
19132 Vals[i] = Amt.getOperand(i);
19133 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
19134 for (unsigned j = 0; j != Ratio; ++j)
19135 if (Vals[j] != Amt.getOperand(i + j))
19138 switch (Op.getOpcode()) {
19140 llvm_unreachable("Unknown shift opcode!");
19142 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
19144 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
19146 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
19153 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
19154 SelectionDAG &DAG) {
19155 MVT VT = Op.getSimpleValueType();
19157 SDValue R = Op.getOperand(0);
19158 SDValue Amt = Op.getOperand(1);
19161 assert(VT.isVector() && "Custom lowering only for vector shifts!");
19162 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
19164 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
19168 V = LowerScalarVariableShift(Op, DAG, Subtarget);
19172 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
19174 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
19175 if (Subtarget->hasInt256()) {
19176 if (Op.getOpcode() == ISD::SRL &&
19177 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19178 VT == MVT::v4i64 || VT == MVT::v8i32))
19180 if (Op.getOpcode() == ISD::SHL &&
19181 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19182 VT == MVT::v4i64 || VT == MVT::v8i32))
19184 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
19188 // If possible, lower this packed shift into a vector multiply instead of
19189 // expanding it into a sequence of scalar shifts.
19190 // Do this only if the vector shift count is a constant build_vector.
19191 if (Op.getOpcode() == ISD::SHL &&
19192 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
19193 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
19194 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19195 SmallVector<SDValue, 8> Elts;
19196 EVT SVT = VT.getScalarType();
19197 unsigned SVTBits = SVT.getSizeInBits();
19198 const APInt &One = APInt(SVTBits, 1);
19199 unsigned NumElems = VT.getVectorNumElements();
19201 for (unsigned i=0; i !=NumElems; ++i) {
19202 SDValue Op = Amt->getOperand(i);
19203 if (Op->getOpcode() == ISD::UNDEF) {
19204 Elts.push_back(Op);
19208 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
19209 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
19210 uint64_t ShAmt = C.getZExtValue();
19211 if (ShAmt >= SVTBits) {
19212 Elts.push_back(DAG.getUNDEF(SVT));
19215 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
19217 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
19218 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
19221 // Lower SHL with variable shift amount.
19222 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
19223 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
19225 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
19226 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
19227 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
19228 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
19231 // If possible, lower this shift as a sequence of two shifts by
19232 // constant plus a MOVSS/MOVSD instead of scalarizing it.
19234 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
19236 // Could be rewritten as:
19237 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
19239 // The advantage is that the two shifts from the example would be
19240 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
19241 // the vector shift into four scalar shifts plus four pairs of vector
19243 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
19244 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19245 unsigned TargetOpcode = X86ISD::MOVSS;
19246 bool CanBeSimplified;
19247 // The splat value for the first packed shift (the 'X' from the example).
19248 SDValue Amt1 = Amt->getOperand(0);
19249 // The splat value for the second packed shift (the 'Y' from the example).
19250 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
19251 Amt->getOperand(2);
19253 // See if it is possible to replace this node with a sequence of
19254 // two shifts followed by a MOVSS/MOVSD
19255 if (VT == MVT::v4i32) {
19256 // Check if it is legal to use a MOVSS.
19257 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
19258 Amt2 == Amt->getOperand(3);
19259 if (!CanBeSimplified) {
19260 // Otherwise, check if we can still simplify this node using a MOVSD.
19261 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
19262 Amt->getOperand(2) == Amt->getOperand(3);
19263 TargetOpcode = X86ISD::MOVSD;
19264 Amt2 = Amt->getOperand(2);
19267 // Do similar checks for the case where the machine value type
19269 CanBeSimplified = Amt1 == Amt->getOperand(1);
19270 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
19271 CanBeSimplified = Amt2 == Amt->getOperand(i);
19273 if (!CanBeSimplified) {
19274 TargetOpcode = X86ISD::MOVSD;
19275 CanBeSimplified = true;
19276 Amt2 = Amt->getOperand(4);
19277 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
19278 CanBeSimplified = Amt1 == Amt->getOperand(i);
19279 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
19280 CanBeSimplified = Amt2 == Amt->getOperand(j);
19284 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
19285 isa<ConstantSDNode>(Amt2)) {
19286 // Replace this node with two shifts followed by a MOVSS/MOVSD.
19287 EVT CastVT = MVT::v4i32;
19289 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
19290 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
19292 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
19293 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
19294 if (TargetOpcode == X86ISD::MOVSD)
19295 CastVT = MVT::v2i64;
19296 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
19297 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
19298 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
19300 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
19304 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
19305 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
19308 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
19309 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
19311 // Turn 'a' into a mask suitable for VSELECT
19312 SDValue VSelM = DAG.getConstant(0x80, VT);
19313 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19314 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19316 SDValue CM1 = DAG.getConstant(0x0f, VT);
19317 SDValue CM2 = DAG.getConstant(0x3f, VT);
19319 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
19320 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
19321 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
19322 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19323 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19326 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19327 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19328 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19330 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
19331 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
19332 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
19333 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19334 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19337 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19338 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19339 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19341 // return VSELECT(r, r+r, a);
19342 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
19343 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
19347 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
19348 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
19349 // solution better.
19350 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
19351 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
19353 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
19354 R = DAG.getNode(ExtOpc, dl, NewVT, R);
19355 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
19356 return DAG.getNode(ISD::TRUNCATE, dl, VT,
19357 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
19360 // Decompose 256-bit shifts into smaller 128-bit shifts.
19361 if (VT.is256BitVector()) {
19362 unsigned NumElems = VT.getVectorNumElements();
19363 MVT EltVT = VT.getVectorElementType();
19364 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19366 // Extract the two vectors
19367 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
19368 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
19370 // Recreate the shift amount vectors
19371 SDValue Amt1, Amt2;
19372 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
19373 // Constant shift amount
19374 SmallVector<SDValue, 4> Amt1Csts;
19375 SmallVector<SDValue, 4> Amt2Csts;
19376 for (unsigned i = 0; i != NumElems/2; ++i)
19377 Amt1Csts.push_back(Amt->getOperand(i));
19378 for (unsigned i = NumElems/2; i != NumElems; ++i)
19379 Amt2Csts.push_back(Amt->getOperand(i));
19381 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
19382 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
19384 // Variable shift amount
19385 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
19386 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
19389 // Issue new vector shifts for the smaller types
19390 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
19391 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
19393 // Concatenate the result back
19394 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19400 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19401 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19402 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19403 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19404 // has only one use.
19405 SDNode *N = Op.getNode();
19406 SDValue LHS = N->getOperand(0);
19407 SDValue RHS = N->getOperand(1);
19408 unsigned BaseOp = 0;
19411 switch (Op.getOpcode()) {
19412 default: llvm_unreachable("Unknown ovf instruction!");
19414 // A subtract of one will be selected as a INC. Note that INC doesn't
19415 // set CF, so we can't do this for UADDO.
19416 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19418 BaseOp = X86ISD::INC;
19419 Cond = X86::COND_O;
19422 BaseOp = X86ISD::ADD;
19423 Cond = X86::COND_O;
19426 BaseOp = X86ISD::ADD;
19427 Cond = X86::COND_B;
19430 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19431 // set CF, so we can't do this for USUBO.
19432 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19434 BaseOp = X86ISD::DEC;
19435 Cond = X86::COND_O;
19438 BaseOp = X86ISD::SUB;
19439 Cond = X86::COND_O;
19442 BaseOp = X86ISD::SUB;
19443 Cond = X86::COND_B;
19446 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19447 Cond = X86::COND_O;
19449 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19450 if (N->getValueType(0) == MVT::i8) {
19451 BaseOp = X86ISD::UMUL8;
19452 Cond = X86::COND_O;
19455 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19457 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19460 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19461 DAG.getConstant(X86::COND_O, MVT::i32),
19462 SDValue(Sum.getNode(), 2));
19464 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19468 // Also sets EFLAGS.
19469 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19470 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19473 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19474 DAG.getConstant(Cond, MVT::i32),
19475 SDValue(Sum.getNode(), 1));
19477 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19480 // Sign extension of the low part of vector elements. This may be used either
19481 // when sign extend instructions are not available or if the vector element
19482 // sizes already match the sign-extended size. If the vector elements are in
19483 // their pre-extended size and sign extend instructions are available, that will
19484 // be handled by LowerSIGN_EXTEND.
19485 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19486 SelectionDAG &DAG) const {
19488 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19489 MVT VT = Op.getSimpleValueType();
19491 if (!Subtarget->hasSSE2() || !VT.isVector())
19494 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19495 ExtraVT.getScalarType().getSizeInBits();
19497 switch (VT.SimpleTy) {
19498 default: return SDValue();
19501 if (!Subtarget->hasFp256())
19503 if (!Subtarget->hasInt256()) {
19504 // needs to be split
19505 unsigned NumElems = VT.getVectorNumElements();
19507 // Extract the LHS vectors
19508 SDValue LHS = Op.getOperand(0);
19509 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19510 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19512 MVT EltVT = VT.getVectorElementType();
19513 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19515 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19516 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19517 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19519 SDValue Extra = DAG.getValueType(ExtraVT);
19521 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19522 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19524 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19529 SDValue Op0 = Op.getOperand(0);
19531 // This is a sign extension of some low part of vector elements without
19532 // changing the size of the vector elements themselves:
19533 // Shift-Left + Shift-Right-Algebraic.
19534 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19536 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19542 /// Returns true if the operand type is exactly twice the native width, and
19543 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19544 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19545 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19546 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19547 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19550 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19551 else if (OpWidth == 128)
19552 return Subtarget->hasCmpxchg16b();
19557 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19558 return needsCmpXchgNb(SI->getValueOperand()->getType());
19561 // Note: this turns large loads into lock cmpxchg8b/16b.
19562 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19563 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19564 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19565 return needsCmpXchgNb(PTy->getElementType());
19568 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19569 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19570 const Type *MemType = AI->getType();
19572 // If the operand is too big, we must see if cmpxchg8/16b is available
19573 // and default to library calls otherwise.
19574 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19575 return needsCmpXchgNb(MemType);
19577 AtomicRMWInst::BinOp Op = AI->getOperation();
19580 llvm_unreachable("Unknown atomic operation");
19581 case AtomicRMWInst::Xchg:
19582 case AtomicRMWInst::Add:
19583 case AtomicRMWInst::Sub:
19584 // It's better to use xadd, xsub or xchg for these in all cases.
19586 case AtomicRMWInst::Or:
19587 case AtomicRMWInst::And:
19588 case AtomicRMWInst::Xor:
19589 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19590 // prefix to a normal instruction for these operations.
19591 return !AI->use_empty();
19592 case AtomicRMWInst::Nand:
19593 case AtomicRMWInst::Max:
19594 case AtomicRMWInst::Min:
19595 case AtomicRMWInst::UMax:
19596 case AtomicRMWInst::UMin:
19597 // These always require a non-trivial set of data operations on x86. We must
19598 // use a cmpxchg loop.
19603 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19604 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19605 // no-sse2). There isn't any reason to disable it if the target processor
19607 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19611 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19612 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19613 const Type *MemType = AI->getType();
19614 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19615 // there is no benefit in turning such RMWs into loads, and it is actually
19616 // harmful as it introduces a mfence.
19617 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19620 auto Builder = IRBuilder<>(AI);
19621 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19622 auto SynchScope = AI->getSynchScope();
19623 // We must restrict the ordering to avoid generating loads with Release or
19624 // ReleaseAcquire orderings.
19625 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19626 auto Ptr = AI->getPointerOperand();
19628 // Before the load we need a fence. Here is an example lifted from
19629 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19632 // x.store(1, relaxed);
19633 // r1 = y.fetch_add(0, release);
19635 // y.fetch_add(42, acquire);
19636 // r2 = x.load(relaxed);
19637 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19638 // lowered to just a load without a fence. A mfence flushes the store buffer,
19639 // making the optimization clearly correct.
19640 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19641 // otherwise, we might be able to be more agressive on relaxed idempotent
19642 // rmw. In practice, they do not look useful, so we don't try to be
19643 // especially clever.
19644 if (SynchScope == SingleThread) {
19645 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19646 // the IR level, so we must wrap it in an intrinsic.
19648 } else if (hasMFENCE(*Subtarget)) {
19649 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19650 Intrinsic::x86_sse2_mfence);
19651 Builder.CreateCall(MFence);
19653 // FIXME: it might make sense to use a locked operation here but on a
19654 // different cache-line to prevent cache-line bouncing. In practice it
19655 // is probably a small win, and x86 processors without mfence are rare
19656 // enough that we do not bother.
19660 // Finally we can emit the atomic load.
19661 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19662 AI->getType()->getPrimitiveSizeInBits());
19663 Loaded->setAtomic(Order, SynchScope);
19664 AI->replaceAllUsesWith(Loaded);
19665 AI->eraseFromParent();
19669 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19670 SelectionDAG &DAG) {
19672 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19673 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19674 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19675 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19677 // The only fence that needs an instruction is a sequentially-consistent
19678 // cross-thread fence.
19679 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19680 if (hasMFENCE(*Subtarget))
19681 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19683 SDValue Chain = Op.getOperand(0);
19684 SDValue Zero = DAG.getConstant(0, MVT::i32);
19686 DAG.getRegister(X86::ESP, MVT::i32), // Base
19687 DAG.getTargetConstant(1, MVT::i8), // Scale
19688 DAG.getRegister(0, MVT::i32), // Index
19689 DAG.getTargetConstant(0, MVT::i32), // Disp
19690 DAG.getRegister(0, MVT::i32), // Segment.
19694 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19695 return SDValue(Res, 0);
19698 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19699 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19702 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19703 SelectionDAG &DAG) {
19704 MVT T = Op.getSimpleValueType();
19708 switch(T.SimpleTy) {
19709 default: llvm_unreachable("Invalid value type!");
19710 case MVT::i8: Reg = X86::AL; size = 1; break;
19711 case MVT::i16: Reg = X86::AX; size = 2; break;
19712 case MVT::i32: Reg = X86::EAX; size = 4; break;
19714 assert(Subtarget->is64Bit() && "Node not type legal!");
19715 Reg = X86::RAX; size = 8;
19718 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19719 Op.getOperand(2), SDValue());
19720 SDValue Ops[] = { cpIn.getValue(0),
19723 DAG.getTargetConstant(size, MVT::i8),
19724 cpIn.getValue(1) };
19725 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19726 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19727 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19731 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19732 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19733 MVT::i32, cpOut.getValue(2));
19734 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19735 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19737 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19738 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19739 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19743 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19744 SelectionDAG &DAG) {
19745 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19746 MVT DstVT = Op.getSimpleValueType();
19748 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19749 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19750 if (DstVT != MVT::f64)
19751 // This conversion needs to be expanded.
19754 SDValue InVec = Op->getOperand(0);
19756 unsigned NumElts = SrcVT.getVectorNumElements();
19757 EVT SVT = SrcVT.getVectorElementType();
19759 // Widen the vector in input in the case of MVT::v2i32.
19760 // Example: from MVT::v2i32 to MVT::v4i32.
19761 SmallVector<SDValue, 16> Elts;
19762 for (unsigned i = 0, e = NumElts; i != e; ++i)
19763 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19764 DAG.getIntPtrConstant(i)));
19766 // Explicitly mark the extra elements as Undef.
19767 SDValue Undef = DAG.getUNDEF(SVT);
19768 for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
19769 Elts.push_back(Undef);
19771 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19772 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19773 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19774 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19775 DAG.getIntPtrConstant(0));
19778 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19779 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19780 assert((DstVT == MVT::i64 ||
19781 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19782 "Unexpected custom BITCAST");
19783 // i64 <=> MMX conversions are Legal.
19784 if (SrcVT==MVT::i64 && DstVT.isVector())
19786 if (DstVT==MVT::i64 && SrcVT.isVector())
19788 // MMX <=> MMX conversions are Legal.
19789 if (SrcVT.isVector() && DstVT.isVector())
19791 // All other conversions need to be expanded.
19795 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19796 SelectionDAG &DAG) {
19797 SDNode *Node = Op.getNode();
19800 Op = Op.getOperand(0);
19801 EVT VT = Op.getValueType();
19802 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19803 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19805 unsigned NumElts = VT.getVectorNumElements();
19806 EVT EltVT = VT.getVectorElementType();
19807 unsigned Len = EltVT.getSizeInBits();
19809 // This is the vectorized version of the "best" algorithm from
19810 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19811 // with a minor tweak to use a series of adds + shifts instead of vector
19812 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19814 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19815 // v8i32 => Always profitable
19817 // FIXME: There a couple of possible improvements:
19819 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19820 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19822 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19823 "CTPOP not implemented for this vector element type.");
19825 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19826 // extra legalization.
19827 bool NeedsBitcast = EltVT == MVT::i32;
19828 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19830 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19831 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19832 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19834 // v = v - ((v >> 1) & 0x55555555...)
19835 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19836 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19837 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19839 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19841 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19842 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19844 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19846 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19847 if (VT != And.getValueType())
19848 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19849 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19851 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19852 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19853 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19854 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19855 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19857 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19858 if (NeedsBitcast) {
19859 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19860 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19861 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19864 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19865 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19866 if (VT != AndRHS.getValueType()) {
19867 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19868 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19870 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19872 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19873 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19874 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19875 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19876 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19878 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19879 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19880 if (NeedsBitcast) {
19881 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19882 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19884 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19885 if (VT != And.getValueType())
19886 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19888 // The algorithm mentioned above uses:
19889 // v = (v * 0x01010101...) >> (Len - 8)
19891 // Change it to use vector adds + vector shifts which yield faster results on
19892 // Haswell than using vector integer multiplication.
19894 // For i32 elements:
19895 // v = v + (v >> 8)
19896 // v = v + (v >> 16)
19898 // For i64 elements:
19899 // v = v + (v >> 8)
19900 // v = v + (v >> 16)
19901 // v = v + (v >> 32)
19904 SmallVector<SDValue, 8> Csts;
19905 for (unsigned i = 8; i <= Len/2; i *= 2) {
19906 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19907 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19908 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19909 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19913 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19914 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19915 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19916 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19917 if (NeedsBitcast) {
19918 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19919 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19921 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19922 if (VT != And.getValueType())
19923 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19928 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19929 SDNode *Node = Op.getNode();
19931 EVT T = Node->getValueType(0);
19932 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19933 DAG.getConstant(0, T), Node->getOperand(2));
19934 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19935 cast<AtomicSDNode>(Node)->getMemoryVT(),
19936 Node->getOperand(0),
19937 Node->getOperand(1), negOp,
19938 cast<AtomicSDNode>(Node)->getMemOperand(),
19939 cast<AtomicSDNode>(Node)->getOrdering(),
19940 cast<AtomicSDNode>(Node)->getSynchScope());
19943 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19944 SDNode *Node = Op.getNode();
19946 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19948 // Convert seq_cst store -> xchg
19949 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19950 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19951 // (The only way to get a 16-byte store is cmpxchg16b)
19952 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19953 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19954 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19955 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19956 cast<AtomicSDNode>(Node)->getMemoryVT(),
19957 Node->getOperand(0),
19958 Node->getOperand(1), Node->getOperand(2),
19959 cast<AtomicSDNode>(Node)->getMemOperand(),
19960 cast<AtomicSDNode>(Node)->getOrdering(),
19961 cast<AtomicSDNode>(Node)->getSynchScope());
19962 return Swap.getValue(1);
19964 // Other atomic stores have a simple pattern.
19968 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19969 EVT VT = Op.getNode()->getSimpleValueType(0);
19971 // Let legalize expand this if it isn't a legal type yet.
19972 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19975 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19978 bool ExtraOp = false;
19979 switch (Op.getOpcode()) {
19980 default: llvm_unreachable("Invalid code");
19981 case ISD::ADDC: Opc = X86ISD::ADD; break;
19982 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19983 case ISD::SUBC: Opc = X86ISD::SUB; break;
19984 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19988 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19990 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19991 Op.getOperand(1), Op.getOperand(2));
19994 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19995 SelectionDAG &DAG) {
19996 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19998 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19999 // which returns the values as { float, float } (in XMM0) or
20000 // { double, double } (which is returned in XMM0, XMM1).
20002 SDValue Arg = Op.getOperand(0);
20003 EVT ArgVT = Arg.getValueType();
20004 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
20006 TargetLowering::ArgListTy Args;
20007 TargetLowering::ArgListEntry Entry;
20011 Entry.isSExt = false;
20012 Entry.isZExt = false;
20013 Args.push_back(Entry);
20015 bool isF64 = ArgVT == MVT::f64;
20016 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
20017 // the small struct {f32, f32} is returned in (eax, edx). For f64,
20018 // the results are returned via SRet in memory.
20019 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
20020 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20021 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
20023 Type *RetTy = isF64
20024 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
20025 : (Type*)VectorType::get(ArgTy, 4);
20027 TargetLowering::CallLoweringInfo CLI(DAG);
20028 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
20029 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
20031 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
20034 // Returned in xmm0 and xmm1.
20035 return CallResult.first;
20037 // Returned in bits 0:31 and 32:64 xmm0.
20038 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
20039 CallResult.first, DAG.getIntPtrConstant(0));
20040 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
20041 CallResult.first, DAG.getIntPtrConstant(1));
20042 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
20043 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
20046 /// LowerOperation - Provide custom lowering hooks for some operations.
20048 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
20049 switch (Op.getOpcode()) {
20050 default: llvm_unreachable("Should not custom lower this!");
20051 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
20052 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
20053 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
20054 return LowerCMP_SWAP(Op, Subtarget, DAG);
20055 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
20056 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
20057 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
20058 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
20059 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
20060 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
20061 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
20062 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
20063 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
20064 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
20065 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
20066 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
20067 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
20068 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
20069 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
20070 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
20071 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
20072 case ISD::SHL_PARTS:
20073 case ISD::SRA_PARTS:
20074 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
20075 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
20076 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
20077 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
20078 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
20079 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
20080 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
20081 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
20082 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
20083 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
20084 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
20086 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
20087 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
20088 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
20089 case ISD::SETCC: return LowerSETCC(Op, DAG);
20090 case ISD::SELECT: return LowerSELECT(Op, DAG);
20091 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
20092 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
20093 case ISD::VASTART: return LowerVASTART(Op, DAG);
20094 case ISD::VAARG: return LowerVAARG(Op, DAG);
20095 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
20096 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
20097 case ISD::INTRINSIC_VOID:
20098 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
20099 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
20100 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
20101 case ISD::FRAME_TO_ARGS_OFFSET:
20102 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
20103 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
20104 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
20105 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
20106 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
20107 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
20108 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
20109 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
20110 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
20111 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
20112 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
20113 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
20114 case ISD::UMUL_LOHI:
20115 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
20118 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
20124 case ISD::UMULO: return LowerXALUO(Op, DAG);
20125 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
20126 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
20130 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
20131 case ISD::ADD: return LowerADD(Op, DAG);
20132 case ISD::SUB: return LowerSUB(Op, DAG);
20133 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
20137 /// ReplaceNodeResults - Replace a node with an illegal result type
20138 /// with a new node built out of custom code.
20139 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
20140 SmallVectorImpl<SDValue>&Results,
20141 SelectionDAG &DAG) const {
20143 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20144 switch (N->getOpcode()) {
20146 llvm_unreachable("Do not know how to custom type legalize this operation!");
20147 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
20148 case X86ISD::FMINC:
20150 case X86ISD::FMAXC:
20151 case X86ISD::FMAX: {
20152 EVT VT = N->getValueType(0);
20153 if (VT != MVT::v2f32)
20154 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
20155 SDValue UNDEF = DAG.getUNDEF(VT);
20156 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20157 N->getOperand(0), UNDEF);
20158 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20159 N->getOperand(1), UNDEF);
20160 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
20163 case ISD::SIGN_EXTEND_INREG:
20168 // We don't want to expand or promote these.
20175 case ISD::UDIVREM: {
20176 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
20177 Results.push_back(V);
20180 case ISD::FP_TO_SINT:
20181 case ISD::FP_TO_UINT: {
20182 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
20184 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
20187 std::pair<SDValue,SDValue> Vals =
20188 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
20189 SDValue FIST = Vals.first, StackSlot = Vals.second;
20190 if (FIST.getNode()) {
20191 EVT VT = N->getValueType(0);
20192 // Return a load from the stack slot.
20193 if (StackSlot.getNode())
20194 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
20195 MachinePointerInfo(),
20196 false, false, false, 0));
20198 Results.push_back(FIST);
20202 case ISD::UINT_TO_FP: {
20203 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20204 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
20205 N->getValueType(0) != MVT::v2f32)
20207 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
20209 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
20211 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
20212 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
20213 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
20214 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
20215 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
20216 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
20219 case ISD::FP_ROUND: {
20220 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
20222 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
20223 Results.push_back(V);
20226 case ISD::INTRINSIC_W_CHAIN: {
20227 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
20229 default : llvm_unreachable("Do not know how to custom type "
20230 "legalize this intrinsic operation!");
20231 case Intrinsic::x86_rdtsc:
20232 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20234 case Intrinsic::x86_rdtscp:
20235 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
20237 case Intrinsic::x86_rdpmc:
20238 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
20241 case ISD::READCYCLECOUNTER: {
20242 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20245 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
20246 EVT T = N->getValueType(0);
20247 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
20248 bool Regs64bit = T == MVT::i128;
20249 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
20250 SDValue cpInL, cpInH;
20251 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20252 DAG.getConstant(0, HalfT));
20253 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20254 DAG.getConstant(1, HalfT));
20255 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
20256 Regs64bit ? X86::RAX : X86::EAX,
20258 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
20259 Regs64bit ? X86::RDX : X86::EDX,
20260 cpInH, cpInL.getValue(1));
20261 SDValue swapInL, swapInH;
20262 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20263 DAG.getConstant(0, HalfT));
20264 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20265 DAG.getConstant(1, HalfT));
20266 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
20267 Regs64bit ? X86::RBX : X86::EBX,
20268 swapInL, cpInH.getValue(1));
20269 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
20270 Regs64bit ? X86::RCX : X86::ECX,
20271 swapInH, swapInL.getValue(1));
20272 SDValue Ops[] = { swapInH.getValue(0),
20274 swapInH.getValue(1) };
20275 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20276 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
20277 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
20278 X86ISD::LCMPXCHG8_DAG;
20279 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
20280 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
20281 Regs64bit ? X86::RAX : X86::EAX,
20282 HalfT, Result.getValue(1));
20283 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
20284 Regs64bit ? X86::RDX : X86::EDX,
20285 HalfT, cpOutL.getValue(2));
20286 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
20288 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
20289 MVT::i32, cpOutH.getValue(2));
20291 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20292 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
20293 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
20295 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
20296 Results.push_back(Success);
20297 Results.push_back(EFLAGS.getValue(1));
20300 case ISD::ATOMIC_SWAP:
20301 case ISD::ATOMIC_LOAD_ADD:
20302 case ISD::ATOMIC_LOAD_SUB:
20303 case ISD::ATOMIC_LOAD_AND:
20304 case ISD::ATOMIC_LOAD_OR:
20305 case ISD::ATOMIC_LOAD_XOR:
20306 case ISD::ATOMIC_LOAD_NAND:
20307 case ISD::ATOMIC_LOAD_MIN:
20308 case ISD::ATOMIC_LOAD_MAX:
20309 case ISD::ATOMIC_LOAD_UMIN:
20310 case ISD::ATOMIC_LOAD_UMAX:
20311 case ISD::ATOMIC_LOAD: {
20312 // Delegate to generic TypeLegalization. Situations we can really handle
20313 // should have already been dealt with by AtomicExpandPass.cpp.
20316 case ISD::BITCAST: {
20317 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20318 EVT DstVT = N->getValueType(0);
20319 EVT SrcVT = N->getOperand(0)->getValueType(0);
20321 if (SrcVT != MVT::f64 ||
20322 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
20325 unsigned NumElts = DstVT.getVectorNumElements();
20326 EVT SVT = DstVT.getVectorElementType();
20327 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20328 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
20329 MVT::v2f64, N->getOperand(0));
20330 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
20332 if (ExperimentalVectorWideningLegalization) {
20333 // If we are legalizing vectors by widening, we already have the desired
20334 // legal vector type, just return it.
20335 Results.push_back(ToVecInt);
20339 SmallVector<SDValue, 8> Elts;
20340 for (unsigned i = 0, e = NumElts; i != e; ++i)
20341 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
20342 ToVecInt, DAG.getIntPtrConstant(i)));
20344 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
20349 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
20351 default: return nullptr;
20352 case X86ISD::BSF: return "X86ISD::BSF";
20353 case X86ISD::BSR: return "X86ISD::BSR";
20354 case X86ISD::SHLD: return "X86ISD::SHLD";
20355 case X86ISD::SHRD: return "X86ISD::SHRD";
20356 case X86ISD::FAND: return "X86ISD::FAND";
20357 case X86ISD::FANDN: return "X86ISD::FANDN";
20358 case X86ISD::FOR: return "X86ISD::FOR";
20359 case X86ISD::FXOR: return "X86ISD::FXOR";
20360 case X86ISD::FSRL: return "X86ISD::FSRL";
20361 case X86ISD::FILD: return "X86ISD::FILD";
20362 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
20363 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
20364 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
20365 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
20366 case X86ISD::FLD: return "X86ISD::FLD";
20367 case X86ISD::FST: return "X86ISD::FST";
20368 case X86ISD::CALL: return "X86ISD::CALL";
20369 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
20370 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
20371 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
20372 case X86ISD::BT: return "X86ISD::BT";
20373 case X86ISD::CMP: return "X86ISD::CMP";
20374 case X86ISD::COMI: return "X86ISD::COMI";
20375 case X86ISD::UCOMI: return "X86ISD::UCOMI";
20376 case X86ISD::CMPM: return "X86ISD::CMPM";
20377 case X86ISD::CMPMU: return "X86ISD::CMPMU";
20378 case X86ISD::SETCC: return "X86ISD::SETCC";
20379 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
20380 case X86ISD::FSETCC: return "X86ISD::FSETCC";
20381 case X86ISD::CMOV: return "X86ISD::CMOV";
20382 case X86ISD::BRCOND: return "X86ISD::BRCOND";
20383 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
20384 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
20385 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
20386 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
20387 case X86ISD::Wrapper: return "X86ISD::Wrapper";
20388 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
20389 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20390 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20391 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20392 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20393 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20394 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20395 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20396 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20397 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20398 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20399 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20400 case X86ISD::HADD: return "X86ISD::HADD";
20401 case X86ISD::HSUB: return "X86ISD::HSUB";
20402 case X86ISD::FHADD: return "X86ISD::FHADD";
20403 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20404 case X86ISD::UMAX: return "X86ISD::UMAX";
20405 case X86ISD::UMIN: return "X86ISD::UMIN";
20406 case X86ISD::SMAX: return "X86ISD::SMAX";
20407 case X86ISD::SMIN: return "X86ISD::SMIN";
20408 case X86ISD::FMAX: return "X86ISD::FMAX";
20409 case X86ISD::FMIN: return "X86ISD::FMIN";
20410 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20411 case X86ISD::FMINC: return "X86ISD::FMINC";
20412 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20413 case X86ISD::FRCP: return "X86ISD::FRCP";
20414 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20415 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20416 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20417 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20418 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20419 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20420 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20421 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20422 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20423 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20424 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20425 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20426 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20427 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20428 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20429 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20430 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20431 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
20432 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20433 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20434 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20435 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20436 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20437 case X86ISD::VSHL: return "X86ISD::VSHL";
20438 case X86ISD::VSRL: return "X86ISD::VSRL";
20439 case X86ISD::VSRA: return "X86ISD::VSRA";
20440 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20441 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20442 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20443 case X86ISD::CMPP: return "X86ISD::CMPP";
20444 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20445 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20446 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20447 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20448 case X86ISD::ADD: return "X86ISD::ADD";
20449 case X86ISD::SUB: return "X86ISD::SUB";
20450 case X86ISD::ADC: return "X86ISD::ADC";
20451 case X86ISD::SBB: return "X86ISD::SBB";
20452 case X86ISD::SMUL: return "X86ISD::SMUL";
20453 case X86ISD::UMUL: return "X86ISD::UMUL";
20454 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20455 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20456 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20457 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20458 case X86ISD::INC: return "X86ISD::INC";
20459 case X86ISD::DEC: return "X86ISD::DEC";
20460 case X86ISD::OR: return "X86ISD::OR";
20461 case X86ISD::XOR: return "X86ISD::XOR";
20462 case X86ISD::AND: return "X86ISD::AND";
20463 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20464 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20465 case X86ISD::PTEST: return "X86ISD::PTEST";
20466 case X86ISD::TESTP: return "X86ISD::TESTP";
20467 case X86ISD::TESTM: return "X86ISD::TESTM";
20468 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20469 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20470 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20471 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20472 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20473 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20474 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20475 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20476 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20477 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20478 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20479 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20480 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20481 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20482 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20483 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20484 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20485 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20486 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20487 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20488 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20489 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20490 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20491 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20492 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20493 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20494 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20495 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20496 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20497 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20498 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20499 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20500 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20501 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20502 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20503 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20504 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20505 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20506 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20507 case X86ISD::SAHF: return "X86ISD::SAHF";
20508 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20509 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20510 case X86ISD::FMADD: return "X86ISD::FMADD";
20511 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20512 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20513 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20514 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20515 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20516 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20517 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20518 case X86ISD::XTEST: return "X86ISD::XTEST";
20519 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20520 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20521 case X86ISD::SELECT: return "X86ISD::SELECT";
20522 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20523 case X86ISD::RCP28: return "X86ISD::RCP28";
20524 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20528 // isLegalAddressingMode - Return true if the addressing mode represented
20529 // by AM is legal for this target, for a load/store of the specified type.
20530 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20532 // X86 supports extremely general addressing modes.
20533 CodeModel::Model M = getTargetMachine().getCodeModel();
20534 Reloc::Model R = getTargetMachine().getRelocationModel();
20536 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20537 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20542 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20544 // If a reference to this global requires an extra load, we can't fold it.
20545 if (isGlobalStubReference(GVFlags))
20548 // If BaseGV requires a register for the PIC base, we cannot also have a
20549 // BaseReg specified.
20550 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20553 // If lower 4G is not available, then we must use rip-relative addressing.
20554 if ((M != CodeModel::Small || R != Reloc::Static) &&
20555 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20559 switch (AM.Scale) {
20565 // These scales always work.
20570 // These scales are formed with basereg+scalereg. Only accept if there is
20575 default: // Other stuff never works.
20582 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20583 unsigned Bits = Ty->getScalarSizeInBits();
20585 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20586 // particularly cheaper than those without.
20590 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20591 // variable shifts just as cheap as scalar ones.
20592 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20595 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20596 // fully general vector.
20600 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20601 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20603 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20604 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20605 return NumBits1 > NumBits2;
20608 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20609 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20612 if (!isTypeLegal(EVT::getEVT(Ty1)))
20615 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20617 // Assuming the caller doesn't have a zeroext or signext return parameter,
20618 // truncation all the way down to i1 is valid.
20622 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20623 return isInt<32>(Imm);
20626 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20627 // Can also use sub to handle negated immediates.
20628 return isInt<32>(Imm);
20631 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20632 if (!VT1.isInteger() || !VT2.isInteger())
20634 unsigned NumBits1 = VT1.getSizeInBits();
20635 unsigned NumBits2 = VT2.getSizeInBits();
20636 return NumBits1 > NumBits2;
20639 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20640 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20641 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20644 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20645 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20646 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20649 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20650 EVT VT1 = Val.getValueType();
20651 if (isZExtFree(VT1, VT2))
20654 if (Val.getOpcode() != ISD::LOAD)
20657 if (!VT1.isSimple() || !VT1.isInteger() ||
20658 !VT2.isSimple() || !VT2.isInteger())
20661 switch (VT1.getSimpleVT().SimpleTy) {
20666 // X86 has 8, 16, and 32-bit zero-extending loads.
20673 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
20676 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20677 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20680 VT = VT.getScalarType();
20682 if (!VT.isSimple())
20685 switch (VT.getSimpleVT().SimpleTy) {
20696 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20697 // i16 instructions are longer (0x66 prefix) and potentially slower.
20698 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20701 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20702 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20703 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20704 /// are assumed to be legal.
20706 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20708 if (!VT.isSimple())
20711 MVT SVT = VT.getSimpleVT();
20713 // Very little shuffling can be done for 64-bit vectors right now.
20714 if (VT.getSizeInBits() == 64)
20717 // This is an experimental legality test that is tailored to match the
20718 // legality test of the experimental lowering more closely. They are gated
20719 // separately to ease testing of performance differences.
20720 if (ExperimentalVectorShuffleLegality)
20721 // We only care that the types being shuffled are legal. The lowering can
20722 // handle any possible shuffle mask that results.
20723 return isTypeLegal(SVT);
20725 // If this is a single-input shuffle with no 128 bit lane crossings we can
20726 // lower it into pshufb.
20727 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20728 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20729 bool isLegal = true;
20730 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20731 if (M[I] >= (int)SVT.getVectorNumElements() ||
20732 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20741 // FIXME: blends, shifts.
20742 return (SVT.getVectorNumElements() == 2 ||
20743 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20744 isMOVLMask(M, SVT) ||
20745 isCommutedMOVLMask(M, SVT) ||
20746 isMOVHLPSMask(M, SVT) ||
20747 isSHUFPMask(M, SVT) ||
20748 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20749 isPSHUFDMask(M, SVT) ||
20750 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20751 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20752 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20753 isPALIGNRMask(M, SVT, Subtarget) ||
20754 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20755 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20756 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20757 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20758 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20759 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20763 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20765 if (!VT.isSimple())
20768 MVT SVT = VT.getSimpleVT();
20770 // This is an experimental legality test that is tailored to match the
20771 // legality test of the experimental lowering more closely. They are gated
20772 // separately to ease testing of performance differences.
20773 if (ExperimentalVectorShuffleLegality)
20774 // The new vector shuffle lowering is very good at managing zero-inputs.
20775 return isShuffleMaskLegal(Mask, VT);
20777 unsigned NumElts = SVT.getVectorNumElements();
20778 // FIXME: This collection of masks seems suspect.
20781 if (NumElts == 4 && SVT.is128BitVector()) {
20782 return (isMOVLMask(Mask, SVT) ||
20783 isCommutedMOVLMask(Mask, SVT, true) ||
20784 isSHUFPMask(Mask, SVT) ||
20785 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20786 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20787 Subtarget->hasInt256()));
20792 //===----------------------------------------------------------------------===//
20793 // X86 Scheduler Hooks
20794 //===----------------------------------------------------------------------===//
20796 /// Utility function to emit xbegin specifying the start of an RTM region.
20797 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20798 const TargetInstrInfo *TII) {
20799 DebugLoc DL = MI->getDebugLoc();
20801 const BasicBlock *BB = MBB->getBasicBlock();
20802 MachineFunction::iterator I = MBB;
20805 // For the v = xbegin(), we generate
20816 MachineBasicBlock *thisMBB = MBB;
20817 MachineFunction *MF = MBB->getParent();
20818 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20819 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20820 MF->insert(I, mainMBB);
20821 MF->insert(I, sinkMBB);
20823 // Transfer the remainder of BB and its successor edges to sinkMBB.
20824 sinkMBB->splice(sinkMBB->begin(), MBB,
20825 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20826 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20830 // # fallthrough to mainMBB
20831 // # abortion to sinkMBB
20832 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20833 thisMBB->addSuccessor(mainMBB);
20834 thisMBB->addSuccessor(sinkMBB);
20838 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20839 mainMBB->addSuccessor(sinkMBB);
20842 // EAX is live into the sinkMBB
20843 sinkMBB->addLiveIn(X86::EAX);
20844 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20845 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20848 MI->eraseFromParent();
20852 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20853 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20854 // in the .td file.
20855 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20856 const TargetInstrInfo *TII) {
20858 switch (MI->getOpcode()) {
20859 default: llvm_unreachable("illegal opcode!");
20860 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20861 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20862 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20863 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20864 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20865 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20866 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20867 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20870 DebugLoc dl = MI->getDebugLoc();
20871 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20873 unsigned NumArgs = MI->getNumOperands();
20874 for (unsigned i = 1; i < NumArgs; ++i) {
20875 MachineOperand &Op = MI->getOperand(i);
20876 if (!(Op.isReg() && Op.isImplicit()))
20877 MIB.addOperand(Op);
20879 if (MI->hasOneMemOperand())
20880 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20882 BuildMI(*BB, MI, dl,
20883 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20884 .addReg(X86::XMM0);
20886 MI->eraseFromParent();
20890 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20891 // defs in an instruction pattern
20892 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20893 const TargetInstrInfo *TII) {
20895 switch (MI->getOpcode()) {
20896 default: llvm_unreachable("illegal opcode!");
20897 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20898 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20899 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20900 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20901 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20902 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20903 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20904 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20907 DebugLoc dl = MI->getDebugLoc();
20908 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20910 unsigned NumArgs = MI->getNumOperands(); // remove the results
20911 for (unsigned i = 1; i < NumArgs; ++i) {
20912 MachineOperand &Op = MI->getOperand(i);
20913 if (!(Op.isReg() && Op.isImplicit()))
20914 MIB.addOperand(Op);
20916 if (MI->hasOneMemOperand())
20917 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20919 BuildMI(*BB, MI, dl,
20920 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20923 MI->eraseFromParent();
20927 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20928 const X86Subtarget *Subtarget) {
20929 DebugLoc dl = MI->getDebugLoc();
20930 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20931 // Address into RAX/EAX, other two args into ECX, EDX.
20932 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20933 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20934 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20935 for (int i = 0; i < X86::AddrNumOperands; ++i)
20936 MIB.addOperand(MI->getOperand(i));
20938 unsigned ValOps = X86::AddrNumOperands;
20939 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20940 .addReg(MI->getOperand(ValOps).getReg());
20941 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20942 .addReg(MI->getOperand(ValOps+1).getReg());
20944 // The instruction doesn't actually take any operands though.
20945 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20947 MI->eraseFromParent(); // The pseudo is gone now.
20951 MachineBasicBlock *
20952 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
20953 MachineBasicBlock *MBB) const {
20954 // Emit va_arg instruction on X86-64.
20956 // Operands to this pseudo-instruction:
20957 // 0 ) Output : destination address (reg)
20958 // 1-5) Input : va_list address (addr, i64mem)
20959 // 6 ) ArgSize : Size (in bytes) of vararg type
20960 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20961 // 8 ) Align : Alignment of type
20962 // 9 ) EFLAGS (implicit-def)
20964 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20965 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20967 unsigned DestReg = MI->getOperand(0).getReg();
20968 MachineOperand &Base = MI->getOperand(1);
20969 MachineOperand &Scale = MI->getOperand(2);
20970 MachineOperand &Index = MI->getOperand(3);
20971 MachineOperand &Disp = MI->getOperand(4);
20972 MachineOperand &Segment = MI->getOperand(5);
20973 unsigned ArgSize = MI->getOperand(6).getImm();
20974 unsigned ArgMode = MI->getOperand(7).getImm();
20975 unsigned Align = MI->getOperand(8).getImm();
20977 // Memory Reference
20978 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20979 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20980 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20982 // Machine Information
20983 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20984 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20985 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20986 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20987 DebugLoc DL = MI->getDebugLoc();
20989 // struct va_list {
20992 // i64 overflow_area (address)
20993 // i64 reg_save_area (address)
20995 // sizeof(va_list) = 24
20996 // alignment(va_list) = 8
20998 unsigned TotalNumIntRegs = 6;
20999 unsigned TotalNumXMMRegs = 8;
21000 bool UseGPOffset = (ArgMode == 1);
21001 bool UseFPOffset = (ArgMode == 2);
21002 unsigned MaxOffset = TotalNumIntRegs * 8 +
21003 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
21005 /* Align ArgSize to a multiple of 8 */
21006 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
21007 bool NeedsAlign = (Align > 8);
21009 MachineBasicBlock *thisMBB = MBB;
21010 MachineBasicBlock *overflowMBB;
21011 MachineBasicBlock *offsetMBB;
21012 MachineBasicBlock *endMBB;
21014 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
21015 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
21016 unsigned OffsetReg = 0;
21018 if (!UseGPOffset && !UseFPOffset) {
21019 // If we only pull from the overflow region, we don't create a branch.
21020 // We don't need to alter control flow.
21021 OffsetDestReg = 0; // unused
21022 OverflowDestReg = DestReg;
21024 offsetMBB = nullptr;
21025 overflowMBB = thisMBB;
21028 // First emit code to check if gp_offset (or fp_offset) is below the bound.
21029 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
21030 // If not, pull from overflow_area. (branch to overflowMBB)
21035 // offsetMBB overflowMBB
21040 // Registers for the PHI in endMBB
21041 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
21042 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
21044 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21045 MachineFunction *MF = MBB->getParent();
21046 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21047 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21048 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21050 MachineFunction::iterator MBBIter = MBB;
21053 // Insert the new basic blocks
21054 MF->insert(MBBIter, offsetMBB);
21055 MF->insert(MBBIter, overflowMBB);
21056 MF->insert(MBBIter, endMBB);
21058 // Transfer the remainder of MBB and its successor edges to endMBB.
21059 endMBB->splice(endMBB->begin(), thisMBB,
21060 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
21061 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
21063 // Make offsetMBB and overflowMBB successors of thisMBB
21064 thisMBB->addSuccessor(offsetMBB);
21065 thisMBB->addSuccessor(overflowMBB);
21067 // endMBB is a successor of both offsetMBB and overflowMBB
21068 offsetMBB->addSuccessor(endMBB);
21069 overflowMBB->addSuccessor(endMBB);
21071 // Load the offset value into a register
21072 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
21073 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
21077 .addDisp(Disp, UseFPOffset ? 4 : 0)
21078 .addOperand(Segment)
21079 .setMemRefs(MMOBegin, MMOEnd);
21081 // Check if there is enough room left to pull this argument.
21082 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
21084 .addImm(MaxOffset + 8 - ArgSizeA8);
21086 // Branch to "overflowMBB" if offset >= max
21087 // Fall through to "offsetMBB" otherwise
21088 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
21089 .addMBB(overflowMBB);
21092 // In offsetMBB, emit code to use the reg_save_area.
21094 assert(OffsetReg != 0);
21096 // Read the reg_save_area address.
21097 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
21098 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
21103 .addOperand(Segment)
21104 .setMemRefs(MMOBegin, MMOEnd);
21106 // Zero-extend the offset
21107 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
21108 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
21111 .addImm(X86::sub_32bit);
21113 // Add the offset to the reg_save_area to get the final address.
21114 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
21115 .addReg(OffsetReg64)
21116 .addReg(RegSaveReg);
21118 // Compute the offset for the next argument
21119 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
21120 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
21122 .addImm(UseFPOffset ? 16 : 8);
21124 // Store it back into the va_list.
21125 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
21129 .addDisp(Disp, UseFPOffset ? 4 : 0)
21130 .addOperand(Segment)
21131 .addReg(NextOffsetReg)
21132 .setMemRefs(MMOBegin, MMOEnd);
21135 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
21140 // Emit code to use overflow area
21143 // Load the overflow_area address into a register.
21144 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
21145 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
21150 .addOperand(Segment)
21151 .setMemRefs(MMOBegin, MMOEnd);
21153 // If we need to align it, do so. Otherwise, just copy the address
21154 // to OverflowDestReg.
21156 // Align the overflow address
21157 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
21158 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
21160 // aligned_addr = (addr + (align-1)) & ~(align-1)
21161 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
21162 .addReg(OverflowAddrReg)
21165 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
21167 .addImm(~(uint64_t)(Align-1));
21169 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
21170 .addReg(OverflowAddrReg);
21173 // Compute the next overflow address after this argument.
21174 // (the overflow address should be kept 8-byte aligned)
21175 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
21176 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
21177 .addReg(OverflowDestReg)
21178 .addImm(ArgSizeA8);
21180 // Store the new overflow address.
21181 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
21186 .addOperand(Segment)
21187 .addReg(NextAddrReg)
21188 .setMemRefs(MMOBegin, MMOEnd);
21190 // If we branched, emit the PHI to the front of endMBB.
21192 BuildMI(*endMBB, endMBB->begin(), DL,
21193 TII->get(X86::PHI), DestReg)
21194 .addReg(OffsetDestReg).addMBB(offsetMBB)
21195 .addReg(OverflowDestReg).addMBB(overflowMBB);
21198 // Erase the pseudo instruction
21199 MI->eraseFromParent();
21204 MachineBasicBlock *
21205 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
21207 MachineBasicBlock *MBB) const {
21208 // Emit code to save XMM registers to the stack. The ABI says that the
21209 // number of registers to save is given in %al, so it's theoretically
21210 // possible to do an indirect jump trick to avoid saving all of them,
21211 // however this code takes a simpler approach and just executes all
21212 // of the stores if %al is non-zero. It's less code, and it's probably
21213 // easier on the hardware branch predictor, and stores aren't all that
21214 // expensive anyway.
21216 // Create the new basic blocks. One block contains all the XMM stores,
21217 // and one block is the final destination regardless of whether any
21218 // stores were performed.
21219 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21220 MachineFunction *F = MBB->getParent();
21221 MachineFunction::iterator MBBIter = MBB;
21223 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
21224 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
21225 F->insert(MBBIter, XMMSaveMBB);
21226 F->insert(MBBIter, EndMBB);
21228 // Transfer the remainder of MBB and its successor edges to EndMBB.
21229 EndMBB->splice(EndMBB->begin(), MBB,
21230 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21231 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
21233 // The original block will now fall through to the XMM save block.
21234 MBB->addSuccessor(XMMSaveMBB);
21235 // The XMMSaveMBB will fall through to the end block.
21236 XMMSaveMBB->addSuccessor(EndMBB);
21238 // Now add the instructions.
21239 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21240 DebugLoc DL = MI->getDebugLoc();
21242 unsigned CountReg = MI->getOperand(0).getReg();
21243 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
21244 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
21246 if (!Subtarget->isTargetWin64()) {
21247 // If %al is 0, branch around the XMM save block.
21248 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
21249 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
21250 MBB->addSuccessor(EndMBB);
21253 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
21254 // that was just emitted, but clearly shouldn't be "saved".
21255 assert((MI->getNumOperands() <= 3 ||
21256 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
21257 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
21258 && "Expected last argument to be EFLAGS");
21259 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
21260 // In the XMM save block, save all the XMM argument registers.
21261 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
21262 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
21263 MachineMemOperand *MMO =
21264 F->getMachineMemOperand(
21265 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
21266 MachineMemOperand::MOStore,
21267 /*Size=*/16, /*Align=*/16);
21268 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
21269 .addFrameIndex(RegSaveFrameIndex)
21270 .addImm(/*Scale=*/1)
21271 .addReg(/*IndexReg=*/0)
21272 .addImm(/*Disp=*/Offset)
21273 .addReg(/*Segment=*/0)
21274 .addReg(MI->getOperand(i).getReg())
21275 .addMemOperand(MMO);
21278 MI->eraseFromParent(); // The pseudo instruction is gone now.
21283 // The EFLAGS operand of SelectItr might be missing a kill marker
21284 // because there were multiple uses of EFLAGS, and ISel didn't know
21285 // which to mark. Figure out whether SelectItr should have had a
21286 // kill marker, and set it if it should. Returns the correct kill
21288 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
21289 MachineBasicBlock* BB,
21290 const TargetRegisterInfo* TRI) {
21291 // Scan forward through BB for a use/def of EFLAGS.
21292 MachineBasicBlock::iterator miI(std::next(SelectItr));
21293 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
21294 const MachineInstr& mi = *miI;
21295 if (mi.readsRegister(X86::EFLAGS))
21297 if (mi.definesRegister(X86::EFLAGS))
21298 break; // Should have kill-flag - update below.
21301 // If we hit the end of the block, check whether EFLAGS is live into a
21303 if (miI == BB->end()) {
21304 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
21305 sEnd = BB->succ_end();
21306 sItr != sEnd; ++sItr) {
21307 MachineBasicBlock* succ = *sItr;
21308 if (succ->isLiveIn(X86::EFLAGS))
21313 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
21314 // out. SelectMI should have a kill flag on EFLAGS.
21315 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
21319 MachineBasicBlock *
21320 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
21321 MachineBasicBlock *BB) const {
21322 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21323 DebugLoc DL = MI->getDebugLoc();
21325 // To "insert" a SELECT_CC instruction, we actually have to insert the
21326 // diamond control-flow pattern. The incoming instruction knows the
21327 // destination vreg to set, the condition code register to branch on, the
21328 // true/false values to select between, and a branch opcode to use.
21329 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21330 MachineFunction::iterator It = BB;
21336 // cmpTY ccX, r1, r2
21338 // fallthrough --> copy0MBB
21339 MachineBasicBlock *thisMBB = BB;
21340 MachineFunction *F = BB->getParent();
21341 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
21342 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
21343 F->insert(It, copy0MBB);
21344 F->insert(It, sinkMBB);
21346 // If the EFLAGS register isn't dead in the terminator, then claim that it's
21347 // live into the sink and copy blocks.
21348 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
21349 if (!MI->killsRegister(X86::EFLAGS) &&
21350 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
21351 copy0MBB->addLiveIn(X86::EFLAGS);
21352 sinkMBB->addLiveIn(X86::EFLAGS);
21355 // Transfer the remainder of BB and its successor edges to sinkMBB.
21356 sinkMBB->splice(sinkMBB->begin(), BB,
21357 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21358 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
21360 // Add the true and fallthrough blocks as its successors.
21361 BB->addSuccessor(copy0MBB);
21362 BB->addSuccessor(sinkMBB);
21364 // Create the conditional branch instruction.
21366 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
21367 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
21370 // %FalseValue = ...
21371 // # fallthrough to sinkMBB
21372 copy0MBB->addSuccessor(sinkMBB);
21375 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
21377 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21378 TII->get(X86::PHI), MI->getOperand(0).getReg())
21379 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
21380 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
21382 MI->eraseFromParent(); // The pseudo instruction is gone now.
21386 MachineBasicBlock *
21387 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
21388 MachineBasicBlock *BB) const {
21389 MachineFunction *MF = BB->getParent();
21390 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21391 DebugLoc DL = MI->getDebugLoc();
21392 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21394 assert(MF->shouldSplitStack());
21396 const bool Is64Bit = Subtarget->is64Bit();
21397 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21399 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21400 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21403 // ... [Till the alloca]
21404 // If stacklet is not large enough, jump to mallocMBB
21407 // Allocate by subtracting from RSP
21408 // Jump to continueMBB
21411 // Allocate by call to runtime
21415 // [rest of original BB]
21418 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21419 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21420 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21422 MachineRegisterInfo &MRI = MF->getRegInfo();
21423 const TargetRegisterClass *AddrRegClass =
21424 getRegClassFor(getPointerTy());
21426 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21427 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21428 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21429 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21430 sizeVReg = MI->getOperand(1).getReg(),
21431 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21433 MachineFunction::iterator MBBIter = BB;
21436 MF->insert(MBBIter, bumpMBB);
21437 MF->insert(MBBIter, mallocMBB);
21438 MF->insert(MBBIter, continueMBB);
21440 continueMBB->splice(continueMBB->begin(), BB,
21441 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21442 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21444 // Add code to the main basic block to check if the stack limit has been hit,
21445 // and if so, jump to mallocMBB otherwise to bumpMBB.
21446 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21447 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21448 .addReg(tmpSPVReg).addReg(sizeVReg);
21449 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21450 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21451 .addReg(SPLimitVReg);
21452 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21454 // bumpMBB simply decreases the stack pointer, since we know the current
21455 // stacklet has enough space.
21456 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21457 .addReg(SPLimitVReg);
21458 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21459 .addReg(SPLimitVReg);
21460 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21462 // Calls into a routine in libgcc to allocate more space from the heap.
21463 const uint32_t *RegMask =
21464 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21466 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21468 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21469 .addExternalSymbol("__morestack_allocate_stack_space")
21470 .addRegMask(RegMask)
21471 .addReg(X86::RDI, RegState::Implicit)
21472 .addReg(X86::RAX, RegState::ImplicitDefine);
21473 } else if (Is64Bit) {
21474 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21476 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21477 .addExternalSymbol("__morestack_allocate_stack_space")
21478 .addRegMask(RegMask)
21479 .addReg(X86::EDI, RegState::Implicit)
21480 .addReg(X86::EAX, RegState::ImplicitDefine);
21482 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21484 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21485 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21486 .addExternalSymbol("__morestack_allocate_stack_space")
21487 .addRegMask(RegMask)
21488 .addReg(X86::EAX, RegState::ImplicitDefine);
21492 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21495 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21496 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21497 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21499 // Set up the CFG correctly.
21500 BB->addSuccessor(bumpMBB);
21501 BB->addSuccessor(mallocMBB);
21502 mallocMBB->addSuccessor(continueMBB);
21503 bumpMBB->addSuccessor(continueMBB);
21505 // Take care of the PHI nodes.
21506 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21507 MI->getOperand(0).getReg())
21508 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21509 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21511 // Delete the original pseudo instruction.
21512 MI->eraseFromParent();
21515 return continueMBB;
21518 MachineBasicBlock *
21519 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21520 MachineBasicBlock *BB) const {
21521 DebugLoc DL = MI->getDebugLoc();
21523 assert(!Subtarget->isTargetMachO());
21525 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21527 MI->eraseFromParent(); // The pseudo instruction is gone now.
21531 MachineBasicBlock *
21532 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21533 MachineBasicBlock *BB) const {
21534 // This is pretty easy. We're taking the value that we received from
21535 // our load from the relocation, sticking it in either RDI (x86-64)
21536 // or EAX and doing an indirect call. The return value will then
21537 // be in the normal return register.
21538 MachineFunction *F = BB->getParent();
21539 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21540 DebugLoc DL = MI->getDebugLoc();
21542 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21543 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21545 // Get a register mask for the lowered call.
21546 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21547 // proper register mask.
21548 const uint32_t *RegMask =
21549 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21550 if (Subtarget->is64Bit()) {
21551 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21552 TII->get(X86::MOV64rm), X86::RDI)
21554 .addImm(0).addReg(0)
21555 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21556 MI->getOperand(3).getTargetFlags())
21558 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21559 addDirectMem(MIB, X86::RDI);
21560 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21561 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21562 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21563 TII->get(X86::MOV32rm), X86::EAX)
21565 .addImm(0).addReg(0)
21566 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21567 MI->getOperand(3).getTargetFlags())
21569 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21570 addDirectMem(MIB, X86::EAX);
21571 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21573 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21574 TII->get(X86::MOV32rm), X86::EAX)
21575 .addReg(TII->getGlobalBaseReg(F))
21576 .addImm(0).addReg(0)
21577 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21578 MI->getOperand(3).getTargetFlags())
21580 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21581 addDirectMem(MIB, X86::EAX);
21582 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21585 MI->eraseFromParent(); // The pseudo instruction is gone now.
21589 MachineBasicBlock *
21590 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21591 MachineBasicBlock *MBB) const {
21592 DebugLoc DL = MI->getDebugLoc();
21593 MachineFunction *MF = MBB->getParent();
21594 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21595 MachineRegisterInfo &MRI = MF->getRegInfo();
21597 const BasicBlock *BB = MBB->getBasicBlock();
21598 MachineFunction::iterator I = MBB;
21601 // Memory Reference
21602 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21603 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21606 unsigned MemOpndSlot = 0;
21608 unsigned CurOp = 0;
21610 DstReg = MI->getOperand(CurOp++).getReg();
21611 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21612 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21613 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21614 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21616 MemOpndSlot = CurOp;
21618 MVT PVT = getPointerTy();
21619 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21620 "Invalid Pointer Size!");
21622 // For v = setjmp(buf), we generate
21625 // buf[LabelOffset] = restoreMBB
21626 // SjLjSetup restoreMBB
21632 // v = phi(main, restore)
21635 // if base pointer being used, load it from frame
21638 MachineBasicBlock *thisMBB = MBB;
21639 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21640 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21641 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21642 MF->insert(I, mainMBB);
21643 MF->insert(I, sinkMBB);
21644 MF->push_back(restoreMBB);
21646 MachineInstrBuilder MIB;
21648 // Transfer the remainder of BB and its successor edges to sinkMBB.
21649 sinkMBB->splice(sinkMBB->begin(), MBB,
21650 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21651 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21654 unsigned PtrStoreOpc = 0;
21655 unsigned LabelReg = 0;
21656 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21657 Reloc::Model RM = MF->getTarget().getRelocationModel();
21658 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21659 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21661 // Prepare IP either in reg or imm.
21662 if (!UseImmLabel) {
21663 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21664 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21665 LabelReg = MRI.createVirtualRegister(PtrRC);
21666 if (Subtarget->is64Bit()) {
21667 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21671 .addMBB(restoreMBB)
21674 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21675 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21676 .addReg(XII->getGlobalBaseReg(MF))
21679 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21683 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21685 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21686 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21687 if (i == X86::AddrDisp)
21688 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21690 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21693 MIB.addReg(LabelReg);
21695 MIB.addMBB(restoreMBB);
21696 MIB.setMemRefs(MMOBegin, MMOEnd);
21698 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21699 .addMBB(restoreMBB);
21701 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21702 MIB.addRegMask(RegInfo->getNoPreservedMask());
21703 thisMBB->addSuccessor(mainMBB);
21704 thisMBB->addSuccessor(restoreMBB);
21708 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21709 mainMBB->addSuccessor(sinkMBB);
21712 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21713 TII->get(X86::PHI), DstReg)
21714 .addReg(mainDstReg).addMBB(mainMBB)
21715 .addReg(restoreDstReg).addMBB(restoreMBB);
21718 if (RegInfo->hasBasePointer(*MF)) {
21719 const bool Uses64BitFramePtr =
21720 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
21721 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21722 X86FI->setRestoreBasePointer(MF);
21723 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21724 unsigned BasePtr = RegInfo->getBaseRegister();
21725 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21726 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21727 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21728 .setMIFlag(MachineInstr::FrameSetup);
21730 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21731 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21732 restoreMBB->addSuccessor(sinkMBB);
21734 MI->eraseFromParent();
21738 MachineBasicBlock *
21739 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21740 MachineBasicBlock *MBB) const {
21741 DebugLoc DL = MI->getDebugLoc();
21742 MachineFunction *MF = MBB->getParent();
21743 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21744 MachineRegisterInfo &MRI = MF->getRegInfo();
21746 // Memory Reference
21747 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21748 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21750 MVT PVT = getPointerTy();
21751 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21752 "Invalid Pointer Size!");
21754 const TargetRegisterClass *RC =
21755 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21756 unsigned Tmp = MRI.createVirtualRegister(RC);
21757 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21758 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21759 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21760 unsigned SP = RegInfo->getStackRegister();
21762 MachineInstrBuilder MIB;
21764 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21765 const int64_t SPOffset = 2 * PVT.getStoreSize();
21767 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21768 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21771 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21772 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21773 MIB.addOperand(MI->getOperand(i));
21774 MIB.setMemRefs(MMOBegin, MMOEnd);
21776 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21777 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21778 if (i == X86::AddrDisp)
21779 MIB.addDisp(MI->getOperand(i), LabelOffset);
21781 MIB.addOperand(MI->getOperand(i));
21783 MIB.setMemRefs(MMOBegin, MMOEnd);
21785 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21786 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21787 if (i == X86::AddrDisp)
21788 MIB.addDisp(MI->getOperand(i), SPOffset);
21790 MIB.addOperand(MI->getOperand(i));
21792 MIB.setMemRefs(MMOBegin, MMOEnd);
21794 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21796 MI->eraseFromParent();
21800 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21801 // accumulator loops. Writing back to the accumulator allows the coalescer
21802 // to remove extra copies in the loop.
21803 MachineBasicBlock *
21804 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21805 MachineBasicBlock *MBB) const {
21806 MachineOperand &AddendOp = MI->getOperand(3);
21808 // Bail out early if the addend isn't a register - we can't switch these.
21809 if (!AddendOp.isReg())
21812 MachineFunction &MF = *MBB->getParent();
21813 MachineRegisterInfo &MRI = MF.getRegInfo();
21815 // Check whether the addend is defined by a PHI:
21816 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21817 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21818 if (!AddendDef.isPHI())
21821 // Look for the following pattern:
21823 // %addend = phi [%entry, 0], [%loop, %result]
21825 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21829 // %addend = phi [%entry, 0], [%loop, %result]
21831 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21833 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21834 assert(AddendDef.getOperand(i).isReg());
21835 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21836 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21837 if (&PHISrcInst == MI) {
21838 // Found a matching instruction.
21839 unsigned NewFMAOpc = 0;
21840 switch (MI->getOpcode()) {
21841 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21842 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21843 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21844 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21845 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21846 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21847 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21848 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21849 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21850 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21851 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21852 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21853 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21854 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21855 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21856 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21857 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21858 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21859 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21860 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21862 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21863 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21864 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21865 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21866 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21867 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21868 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21869 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21870 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21871 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21872 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21873 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21874 default: llvm_unreachable("Unrecognized FMA variant.");
21877 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
21878 MachineInstrBuilder MIB =
21879 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21880 .addOperand(MI->getOperand(0))
21881 .addOperand(MI->getOperand(3))
21882 .addOperand(MI->getOperand(2))
21883 .addOperand(MI->getOperand(1));
21884 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21885 MI->eraseFromParent();
21892 MachineBasicBlock *
21893 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21894 MachineBasicBlock *BB) const {
21895 switch (MI->getOpcode()) {
21896 default: llvm_unreachable("Unexpected instr type to insert");
21897 case X86::TAILJMPd64:
21898 case X86::TAILJMPr64:
21899 case X86::TAILJMPm64:
21900 case X86::TAILJMPd64_REX:
21901 case X86::TAILJMPr64_REX:
21902 case X86::TAILJMPm64_REX:
21903 llvm_unreachable("TAILJMP64 would not be touched here.");
21904 case X86::TCRETURNdi64:
21905 case X86::TCRETURNri64:
21906 case X86::TCRETURNmi64:
21908 case X86::WIN_ALLOCA:
21909 return EmitLoweredWinAlloca(MI, BB);
21910 case X86::SEG_ALLOCA_32:
21911 case X86::SEG_ALLOCA_64:
21912 return EmitLoweredSegAlloca(MI, BB);
21913 case X86::TLSCall_32:
21914 case X86::TLSCall_64:
21915 return EmitLoweredTLSCall(MI, BB);
21916 case X86::CMOV_GR8:
21917 case X86::CMOV_FR32:
21918 case X86::CMOV_FR64:
21919 case X86::CMOV_V4F32:
21920 case X86::CMOV_V2F64:
21921 case X86::CMOV_V2I64:
21922 case X86::CMOV_V8F32:
21923 case X86::CMOV_V4F64:
21924 case X86::CMOV_V4I64:
21925 case X86::CMOV_V16F32:
21926 case X86::CMOV_V8F64:
21927 case X86::CMOV_V8I64:
21928 case X86::CMOV_GR16:
21929 case X86::CMOV_GR32:
21930 case X86::CMOV_RFP32:
21931 case X86::CMOV_RFP64:
21932 case X86::CMOV_RFP80:
21933 return EmitLoweredSelect(MI, BB);
21935 case X86::FP32_TO_INT16_IN_MEM:
21936 case X86::FP32_TO_INT32_IN_MEM:
21937 case X86::FP32_TO_INT64_IN_MEM:
21938 case X86::FP64_TO_INT16_IN_MEM:
21939 case X86::FP64_TO_INT32_IN_MEM:
21940 case X86::FP64_TO_INT64_IN_MEM:
21941 case X86::FP80_TO_INT16_IN_MEM:
21942 case X86::FP80_TO_INT32_IN_MEM:
21943 case X86::FP80_TO_INT64_IN_MEM: {
21944 MachineFunction *F = BB->getParent();
21945 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21946 DebugLoc DL = MI->getDebugLoc();
21948 // Change the floating point control register to use "round towards zero"
21949 // mode when truncating to an integer value.
21950 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21951 addFrameReference(BuildMI(*BB, MI, DL,
21952 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21954 // Load the old value of the high byte of the control word...
21956 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21957 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21960 // Set the high part to be round to zero...
21961 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21964 // Reload the modified control word now...
21965 addFrameReference(BuildMI(*BB, MI, DL,
21966 TII->get(X86::FLDCW16m)), CWFrameIdx);
21968 // Restore the memory image of control word to original value
21969 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21972 // Get the X86 opcode to use.
21974 switch (MI->getOpcode()) {
21975 default: llvm_unreachable("illegal opcode!");
21976 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21977 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21978 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21979 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21980 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21981 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21982 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21983 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21984 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21988 MachineOperand &Op = MI->getOperand(0);
21990 AM.BaseType = X86AddressMode::RegBase;
21991 AM.Base.Reg = Op.getReg();
21993 AM.BaseType = X86AddressMode::FrameIndexBase;
21994 AM.Base.FrameIndex = Op.getIndex();
21996 Op = MI->getOperand(1);
21998 AM.Scale = Op.getImm();
21999 Op = MI->getOperand(2);
22001 AM.IndexReg = Op.getImm();
22002 Op = MI->getOperand(3);
22003 if (Op.isGlobal()) {
22004 AM.GV = Op.getGlobal();
22006 AM.Disp = Op.getImm();
22008 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
22009 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
22011 // Reload the original control word now.
22012 addFrameReference(BuildMI(*BB, MI, DL,
22013 TII->get(X86::FLDCW16m)), CWFrameIdx);
22015 MI->eraseFromParent(); // The pseudo instruction is gone now.
22018 // String/text processing lowering.
22019 case X86::PCMPISTRM128REG:
22020 case X86::VPCMPISTRM128REG:
22021 case X86::PCMPISTRM128MEM:
22022 case X86::VPCMPISTRM128MEM:
22023 case X86::PCMPESTRM128REG:
22024 case X86::VPCMPESTRM128REG:
22025 case X86::PCMPESTRM128MEM:
22026 case X86::VPCMPESTRM128MEM:
22027 assert(Subtarget->hasSSE42() &&
22028 "Target must have SSE4.2 or AVX features enabled");
22029 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
22031 // String/text processing lowering.
22032 case X86::PCMPISTRIREG:
22033 case X86::VPCMPISTRIREG:
22034 case X86::PCMPISTRIMEM:
22035 case X86::VPCMPISTRIMEM:
22036 case X86::PCMPESTRIREG:
22037 case X86::VPCMPESTRIREG:
22038 case X86::PCMPESTRIMEM:
22039 case X86::VPCMPESTRIMEM:
22040 assert(Subtarget->hasSSE42() &&
22041 "Target must have SSE4.2 or AVX features enabled");
22042 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
22044 // Thread synchronization.
22046 return EmitMonitor(MI, BB, Subtarget);
22050 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
22052 case X86::VASTART_SAVE_XMM_REGS:
22053 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
22055 case X86::VAARG_64:
22056 return EmitVAARG64WithCustomInserter(MI, BB);
22058 case X86::EH_SjLj_SetJmp32:
22059 case X86::EH_SjLj_SetJmp64:
22060 return emitEHSjLjSetJmp(MI, BB);
22062 case X86::EH_SjLj_LongJmp32:
22063 case X86::EH_SjLj_LongJmp64:
22064 return emitEHSjLjLongJmp(MI, BB);
22066 case TargetOpcode::STATEPOINT:
22067 // As an implementation detail, STATEPOINT shares the STACKMAP format at
22068 // this point in the process. We diverge later.
22069 return emitPatchPoint(MI, BB);
22071 case TargetOpcode::STACKMAP:
22072 case TargetOpcode::PATCHPOINT:
22073 return emitPatchPoint(MI, BB);
22075 case X86::VFMADDPDr213r:
22076 case X86::VFMADDPSr213r:
22077 case X86::VFMADDSDr213r:
22078 case X86::VFMADDSSr213r:
22079 case X86::VFMSUBPDr213r:
22080 case X86::VFMSUBPSr213r:
22081 case X86::VFMSUBSDr213r:
22082 case X86::VFMSUBSSr213r:
22083 case X86::VFNMADDPDr213r:
22084 case X86::VFNMADDPSr213r:
22085 case X86::VFNMADDSDr213r:
22086 case X86::VFNMADDSSr213r:
22087 case X86::VFNMSUBPDr213r:
22088 case X86::VFNMSUBPSr213r:
22089 case X86::VFNMSUBSDr213r:
22090 case X86::VFNMSUBSSr213r:
22091 case X86::VFMADDSUBPDr213r:
22092 case X86::VFMADDSUBPSr213r:
22093 case X86::VFMSUBADDPDr213r:
22094 case X86::VFMSUBADDPSr213r:
22095 case X86::VFMADDPDr213rY:
22096 case X86::VFMADDPSr213rY:
22097 case X86::VFMSUBPDr213rY:
22098 case X86::VFMSUBPSr213rY:
22099 case X86::VFNMADDPDr213rY:
22100 case X86::VFNMADDPSr213rY:
22101 case X86::VFNMSUBPDr213rY:
22102 case X86::VFNMSUBPSr213rY:
22103 case X86::VFMADDSUBPDr213rY:
22104 case X86::VFMADDSUBPSr213rY:
22105 case X86::VFMSUBADDPDr213rY:
22106 case X86::VFMSUBADDPSr213rY:
22107 return emitFMA3Instr(MI, BB);
22111 //===----------------------------------------------------------------------===//
22112 // X86 Optimization Hooks
22113 //===----------------------------------------------------------------------===//
22115 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
22118 const SelectionDAG &DAG,
22119 unsigned Depth) const {
22120 unsigned BitWidth = KnownZero.getBitWidth();
22121 unsigned Opc = Op.getOpcode();
22122 assert((Opc >= ISD::BUILTIN_OP_END ||
22123 Opc == ISD::INTRINSIC_WO_CHAIN ||
22124 Opc == ISD::INTRINSIC_W_CHAIN ||
22125 Opc == ISD::INTRINSIC_VOID) &&
22126 "Should use MaskedValueIsZero if you don't know whether Op"
22127 " is a target node!");
22129 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
22143 // These nodes' second result is a boolean.
22144 if (Op.getResNo() == 0)
22147 case X86ISD::SETCC:
22148 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
22150 case ISD::INTRINSIC_WO_CHAIN: {
22151 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
22152 unsigned NumLoBits = 0;
22155 case Intrinsic::x86_sse_movmsk_ps:
22156 case Intrinsic::x86_avx_movmsk_ps_256:
22157 case Intrinsic::x86_sse2_movmsk_pd:
22158 case Intrinsic::x86_avx_movmsk_pd_256:
22159 case Intrinsic::x86_mmx_pmovmskb:
22160 case Intrinsic::x86_sse2_pmovmskb_128:
22161 case Intrinsic::x86_avx2_pmovmskb: {
22162 // High bits of movmskp{s|d}, pmovmskb are known zero.
22164 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
22165 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
22166 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
22167 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
22168 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
22169 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
22170 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
22171 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
22173 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
22182 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
22184 const SelectionDAG &,
22185 unsigned Depth) const {
22186 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
22187 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
22188 return Op.getValueType().getScalarType().getSizeInBits();
22194 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
22195 /// node is a GlobalAddress + offset.
22196 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
22197 const GlobalValue* &GA,
22198 int64_t &Offset) const {
22199 if (N->getOpcode() == X86ISD::Wrapper) {
22200 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
22201 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
22202 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
22206 return TargetLowering::isGAPlusOffset(N, GA, Offset);
22209 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
22210 /// same as extracting the high 128-bit part of 256-bit vector and then
22211 /// inserting the result into the low part of a new 256-bit vector
22212 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
22213 EVT VT = SVOp->getValueType(0);
22214 unsigned NumElems = VT.getVectorNumElements();
22216 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22217 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
22218 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22219 SVOp->getMaskElt(j) >= 0)
22225 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
22226 /// same as extracting the low 128-bit part of 256-bit vector and then
22227 /// inserting the result into the high part of a new 256-bit vector
22228 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
22229 EVT VT = SVOp->getValueType(0);
22230 unsigned NumElems = VT.getVectorNumElements();
22232 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22233 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
22234 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22235 SVOp->getMaskElt(j) >= 0)
22241 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
22242 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
22243 TargetLowering::DAGCombinerInfo &DCI,
22244 const X86Subtarget* Subtarget) {
22246 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22247 SDValue V1 = SVOp->getOperand(0);
22248 SDValue V2 = SVOp->getOperand(1);
22249 EVT VT = SVOp->getValueType(0);
22250 unsigned NumElems = VT.getVectorNumElements();
22252 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
22253 V2.getOpcode() == ISD::CONCAT_VECTORS) {
22257 // V UNDEF BUILD_VECTOR UNDEF
22259 // CONCAT_VECTOR CONCAT_VECTOR
22262 // RESULT: V + zero extended
22264 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
22265 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
22266 V1.getOperand(1).getOpcode() != ISD::UNDEF)
22269 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
22272 // To match the shuffle mask, the first half of the mask should
22273 // be exactly the first vector, and all the rest a splat with the
22274 // first element of the second one.
22275 for (unsigned i = 0; i != NumElems/2; ++i)
22276 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
22277 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
22280 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
22281 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
22282 if (Ld->hasNUsesOfValue(1, 0)) {
22283 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
22284 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
22286 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
22288 Ld->getPointerInfo(),
22289 Ld->getAlignment(),
22290 false/*isVolatile*/, true/*ReadMem*/,
22291 false/*WriteMem*/);
22293 // Make sure the newly-created LOAD is in the same position as Ld in
22294 // terms of dependency. We create a TokenFactor for Ld and ResNode,
22295 // and update uses of Ld's output chain to use the TokenFactor.
22296 if (Ld->hasAnyUseOfValue(1)) {
22297 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
22298 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
22299 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
22300 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
22301 SDValue(ResNode.getNode(), 1));
22304 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
22308 // Emit a zeroed vector and insert the desired subvector on its
22310 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
22311 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
22312 return DCI.CombineTo(N, InsV);
22315 //===--------------------------------------------------------------------===//
22316 // Combine some shuffles into subvector extracts and inserts:
22319 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22320 if (isShuffleHigh128VectorInsertLow(SVOp)) {
22321 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
22322 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
22323 return DCI.CombineTo(N, InsV);
22326 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22327 if (isShuffleLow128VectorInsertHigh(SVOp)) {
22328 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
22329 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
22330 return DCI.CombineTo(N, InsV);
22336 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
22339 /// This is the leaf of the recursive combinine below. When we have found some
22340 /// chain of single-use x86 shuffle instructions and accumulated the combined
22341 /// shuffle mask represented by them, this will try to pattern match that mask
22342 /// into either a single instruction if there is a special purpose instruction
22343 /// for this operation, or into a PSHUFB instruction which is a fully general
22344 /// instruction but should only be used to replace chains over a certain depth.
22345 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22346 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22347 TargetLowering::DAGCombinerInfo &DCI,
22348 const X86Subtarget *Subtarget) {
22349 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22351 // Find the operand that enters the chain. Note that multiple uses are OK
22352 // here, we're not going to remove the operand we find.
22353 SDValue Input = Op.getOperand(0);
22354 while (Input.getOpcode() == ISD::BITCAST)
22355 Input = Input.getOperand(0);
22357 MVT VT = Input.getSimpleValueType();
22358 MVT RootVT = Root.getSimpleValueType();
22361 // Just remove no-op shuffle masks.
22362 if (Mask.size() == 1) {
22363 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
22368 // Use the float domain if the operand type is a floating point type.
22369 bool FloatDomain = VT.isFloatingPoint();
22371 // For floating point shuffles, we don't have free copies in the shuffle
22372 // instructions or the ability to load as part of the instruction, so
22373 // canonicalize their shuffles to UNPCK or MOV variants.
22375 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22376 // vectors because it can have a load folded into it that UNPCK cannot. This
22377 // doesn't preclude something switching to the shorter encoding post-RA.
22379 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
22380 bool Lo = Mask.equals(0, 0);
22383 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22384 // is no slower than UNPCKLPD but has the option to fold the input operand
22385 // into even an unaligned memory load.
22386 if (Lo && Subtarget->hasSSE3()) {
22387 Shuffle = X86ISD::MOVDDUP;
22388 ShuffleVT = MVT::v2f64;
22390 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22391 // than the UNPCK variants.
22392 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22393 ShuffleVT = MVT::v4f32;
22395 if (Depth == 1 && Root->getOpcode() == Shuffle)
22396 return false; // Nothing to do!
22397 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22398 DCI.AddToWorklist(Op.getNode());
22399 if (Shuffle == X86ISD::MOVDDUP)
22400 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22402 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22403 DCI.AddToWorklist(Op.getNode());
22404 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22408 if (Subtarget->hasSSE3() &&
22409 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22410 bool Lo = Mask.equals(0, 0, 2, 2);
22411 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22412 MVT ShuffleVT = MVT::v4f32;
22413 if (Depth == 1 && Root->getOpcode() == Shuffle)
22414 return false; // Nothing to do!
22415 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22416 DCI.AddToWorklist(Op.getNode());
22417 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22418 DCI.AddToWorklist(Op.getNode());
22419 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22423 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22424 bool Lo = Mask.equals(0, 0, 1, 1);
22425 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22426 MVT ShuffleVT = MVT::v4f32;
22427 if (Depth == 1 && Root->getOpcode() == Shuffle)
22428 return false; // Nothing to do!
22429 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22430 DCI.AddToWorklist(Op.getNode());
22431 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22432 DCI.AddToWorklist(Op.getNode());
22433 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22439 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22440 // variants as none of these have single-instruction variants that are
22441 // superior to the UNPCK formulation.
22442 if (!FloatDomain &&
22443 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22444 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22445 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22446 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22448 bool Lo = Mask[0] == 0;
22449 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22450 if (Depth == 1 && Root->getOpcode() == Shuffle)
22451 return false; // Nothing to do!
22453 switch (Mask.size()) {
22455 ShuffleVT = MVT::v8i16;
22458 ShuffleVT = MVT::v16i8;
22461 llvm_unreachable("Impossible mask size!");
22463 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22464 DCI.AddToWorklist(Op.getNode());
22465 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22466 DCI.AddToWorklist(Op.getNode());
22467 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22472 // Don't try to re-form single instruction chains under any circumstances now
22473 // that we've done encoding canonicalization for them.
22477 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22478 // can replace them with a single PSHUFB instruction profitably. Intel's
22479 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22480 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22481 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22482 SmallVector<SDValue, 16> PSHUFBMask;
22483 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22484 int Ratio = 16 / Mask.size();
22485 for (unsigned i = 0; i < 16; ++i) {
22486 if (Mask[i / Ratio] == SM_SentinelUndef) {
22487 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22490 int M = Mask[i / Ratio] != SM_SentinelZero
22491 ? Ratio * Mask[i / Ratio] + i % Ratio
22493 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22495 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22496 DCI.AddToWorklist(Op.getNode());
22497 SDValue PSHUFBMaskOp =
22498 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22499 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22500 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22501 DCI.AddToWorklist(Op.getNode());
22502 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22507 // Failed to find any combines.
22511 /// \brief Fully generic combining of x86 shuffle instructions.
22513 /// This should be the last combine run over the x86 shuffle instructions. Once
22514 /// they have been fully optimized, this will recursively consider all chains
22515 /// of single-use shuffle instructions, build a generic model of the cumulative
22516 /// shuffle operation, and check for simpler instructions which implement this
22517 /// operation. We use this primarily for two purposes:
22519 /// 1) Collapse generic shuffles to specialized single instructions when
22520 /// equivalent. In most cases, this is just an encoding size win, but
22521 /// sometimes we will collapse multiple generic shuffles into a single
22522 /// special-purpose shuffle.
22523 /// 2) Look for sequences of shuffle instructions with 3 or more total
22524 /// instructions, and replace them with the slightly more expensive SSSE3
22525 /// PSHUFB instruction if available. We do this as the last combining step
22526 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22527 /// a suitable short sequence of other instructions. The PHUFB will either
22528 /// use a register or have to read from memory and so is slightly (but only
22529 /// slightly) more expensive than the other shuffle instructions.
22531 /// Because this is inherently a quadratic operation (for each shuffle in
22532 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22533 /// This should never be an issue in practice as the shuffle lowering doesn't
22534 /// produce sequences of more than 8 instructions.
22536 /// FIXME: We will currently miss some cases where the redundant shuffling
22537 /// would simplify under the threshold for PSHUFB formation because of
22538 /// combine-ordering. To fix this, we should do the redundant instruction
22539 /// combining in this recursive walk.
22540 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22541 ArrayRef<int> RootMask,
22542 int Depth, bool HasPSHUFB,
22544 TargetLowering::DAGCombinerInfo &DCI,
22545 const X86Subtarget *Subtarget) {
22546 // Bound the depth of our recursive combine because this is ultimately
22547 // quadratic in nature.
22551 // Directly rip through bitcasts to find the underlying operand.
22552 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22553 Op = Op.getOperand(0);
22555 MVT VT = Op.getSimpleValueType();
22556 if (!VT.isVector())
22557 return false; // Bail if we hit a non-vector.
22558 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22559 // version should be added.
22560 if (VT.getSizeInBits() != 128)
22563 assert(Root.getSimpleValueType().isVector() &&
22564 "Shuffles operate on vector types!");
22565 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22566 "Can only combine shuffles of the same vector register size.");
22568 if (!isTargetShuffle(Op.getOpcode()))
22570 SmallVector<int, 16> OpMask;
22572 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22573 // We only can combine unary shuffles which we can decode the mask for.
22574 if (!HaveMask || !IsUnary)
22577 assert(VT.getVectorNumElements() == OpMask.size() &&
22578 "Different mask size from vector size!");
22579 assert(((RootMask.size() > OpMask.size() &&
22580 RootMask.size() % OpMask.size() == 0) ||
22581 (OpMask.size() > RootMask.size() &&
22582 OpMask.size() % RootMask.size() == 0) ||
22583 OpMask.size() == RootMask.size()) &&
22584 "The smaller number of elements must divide the larger.");
22585 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22586 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22587 assert(((RootRatio == 1 && OpRatio == 1) ||
22588 (RootRatio == 1) != (OpRatio == 1)) &&
22589 "Must not have a ratio for both incoming and op masks!");
22591 SmallVector<int, 16> Mask;
22592 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22594 // Merge this shuffle operation's mask into our accumulated mask. Note that
22595 // this shuffle's mask will be the first applied to the input, followed by the
22596 // root mask to get us all the way to the root value arrangement. The reason
22597 // for this order is that we are recursing up the operation chain.
22598 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22599 int RootIdx = i / RootRatio;
22600 if (RootMask[RootIdx] < 0) {
22601 // This is a zero or undef lane, we're done.
22602 Mask.push_back(RootMask[RootIdx]);
22606 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22607 int OpIdx = RootMaskedIdx / OpRatio;
22608 if (OpMask[OpIdx] < 0) {
22609 // The incoming lanes are zero or undef, it doesn't matter which ones we
22611 Mask.push_back(OpMask[OpIdx]);
22615 // Ok, we have non-zero lanes, map them through.
22616 Mask.push_back(OpMask[OpIdx] * OpRatio +
22617 RootMaskedIdx % OpRatio);
22620 // See if we can recurse into the operand to combine more things.
22621 switch (Op.getOpcode()) {
22622 case X86ISD::PSHUFB:
22624 case X86ISD::PSHUFD:
22625 case X86ISD::PSHUFHW:
22626 case X86ISD::PSHUFLW:
22627 if (Op.getOperand(0).hasOneUse() &&
22628 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22629 HasPSHUFB, DAG, DCI, Subtarget))
22633 case X86ISD::UNPCKL:
22634 case X86ISD::UNPCKH:
22635 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22636 // We can't check for single use, we have to check that this shuffle is the only user.
22637 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22638 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22639 HasPSHUFB, DAG, DCI, Subtarget))
22644 // Minor canonicalization of the accumulated shuffle mask to make it easier
22645 // to match below. All this does is detect masks with squential pairs of
22646 // elements, and shrink them to the half-width mask. It does this in a loop
22647 // so it will reduce the size of the mask to the minimal width mask which
22648 // performs an equivalent shuffle.
22649 SmallVector<int, 16> WidenedMask;
22650 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22651 Mask = std::move(WidenedMask);
22652 WidenedMask.clear();
22655 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22659 /// \brief Get the PSHUF-style mask from PSHUF node.
22661 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22662 /// PSHUF-style masks that can be reused with such instructions.
22663 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22664 SmallVector<int, 4> Mask;
22666 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22670 switch (N.getOpcode()) {
22671 case X86ISD::PSHUFD:
22673 case X86ISD::PSHUFLW:
22676 case X86ISD::PSHUFHW:
22677 Mask.erase(Mask.begin(), Mask.begin() + 4);
22678 for (int &M : Mask)
22682 llvm_unreachable("No valid shuffle instruction found!");
22686 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22688 /// We walk up the chain and look for a combinable shuffle, skipping over
22689 /// shuffles that we could hoist this shuffle's transformation past without
22690 /// altering anything.
22692 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22694 TargetLowering::DAGCombinerInfo &DCI) {
22695 assert(N.getOpcode() == X86ISD::PSHUFD &&
22696 "Called with something other than an x86 128-bit half shuffle!");
22699 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22700 // of the shuffles in the chain so that we can form a fresh chain to replace
22702 SmallVector<SDValue, 8> Chain;
22703 SDValue V = N.getOperand(0);
22704 for (; V.hasOneUse(); V = V.getOperand(0)) {
22705 switch (V.getOpcode()) {
22707 return SDValue(); // Nothing combined!
22710 // Skip bitcasts as we always know the type for the target specific
22714 case X86ISD::PSHUFD:
22715 // Found another dword shuffle.
22718 case X86ISD::PSHUFLW:
22719 // Check that the low words (being shuffled) are the identity in the
22720 // dword shuffle, and the high words are self-contained.
22721 if (Mask[0] != 0 || Mask[1] != 1 ||
22722 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22725 Chain.push_back(V);
22728 case X86ISD::PSHUFHW:
22729 // Check that the high words (being shuffled) are the identity in the
22730 // dword shuffle, and the low words are self-contained.
22731 if (Mask[2] != 2 || Mask[3] != 3 ||
22732 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22735 Chain.push_back(V);
22738 case X86ISD::UNPCKL:
22739 case X86ISD::UNPCKH:
22740 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22741 // shuffle into a preceding word shuffle.
22742 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22745 // Search for a half-shuffle which we can combine with.
22746 unsigned CombineOp =
22747 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22748 if (V.getOperand(0) != V.getOperand(1) ||
22749 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22751 Chain.push_back(V);
22752 V = V.getOperand(0);
22754 switch (V.getOpcode()) {
22756 return SDValue(); // Nothing to combine.
22758 case X86ISD::PSHUFLW:
22759 case X86ISD::PSHUFHW:
22760 if (V.getOpcode() == CombineOp)
22763 Chain.push_back(V);
22767 V = V.getOperand(0);
22771 } while (V.hasOneUse());
22774 // Break out of the loop if we break out of the switch.
22778 if (!V.hasOneUse())
22779 // We fell out of the loop without finding a viable combining instruction.
22782 // Merge this node's mask and our incoming mask.
22783 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22784 for (int &M : Mask)
22786 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22787 getV4X86ShuffleImm8ForMask(Mask, DAG));
22789 // Rebuild the chain around this new shuffle.
22790 while (!Chain.empty()) {
22791 SDValue W = Chain.pop_back_val();
22793 if (V.getValueType() != W.getOperand(0).getValueType())
22794 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22796 switch (W.getOpcode()) {
22798 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22800 case X86ISD::UNPCKL:
22801 case X86ISD::UNPCKH:
22802 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22805 case X86ISD::PSHUFD:
22806 case X86ISD::PSHUFLW:
22807 case X86ISD::PSHUFHW:
22808 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22812 if (V.getValueType() != N.getValueType())
22813 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22815 // Return the new chain to replace N.
22819 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22821 /// We walk up the chain, skipping shuffles of the other half and looking
22822 /// through shuffles which switch halves trying to find a shuffle of the same
22823 /// pair of dwords.
22824 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22826 TargetLowering::DAGCombinerInfo &DCI) {
22828 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22829 "Called with something other than an x86 128-bit half shuffle!");
22831 unsigned CombineOpcode = N.getOpcode();
22833 // Walk up a single-use chain looking for a combinable shuffle.
22834 SDValue V = N.getOperand(0);
22835 for (; V.hasOneUse(); V = V.getOperand(0)) {
22836 switch (V.getOpcode()) {
22838 return false; // Nothing combined!
22841 // Skip bitcasts as we always know the type for the target specific
22845 case X86ISD::PSHUFLW:
22846 case X86ISD::PSHUFHW:
22847 if (V.getOpcode() == CombineOpcode)
22850 // Other-half shuffles are no-ops.
22853 // Break out of the loop if we break out of the switch.
22857 if (!V.hasOneUse())
22858 // We fell out of the loop without finding a viable combining instruction.
22861 // Combine away the bottom node as its shuffle will be accumulated into
22862 // a preceding shuffle.
22863 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22865 // Record the old value.
22868 // Merge this node's mask and our incoming mask (adjusted to account for all
22869 // the pshufd instructions encountered).
22870 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22871 for (int &M : Mask)
22873 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22874 getV4X86ShuffleImm8ForMask(Mask, DAG));
22876 // Check that the shuffles didn't cancel each other out. If not, we need to
22877 // combine to the new one.
22879 // Replace the combinable shuffle with the combined one, updating all users
22880 // so that we re-evaluate the chain here.
22881 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22886 /// \brief Try to combine x86 target specific shuffles.
22887 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22888 TargetLowering::DAGCombinerInfo &DCI,
22889 const X86Subtarget *Subtarget) {
22891 MVT VT = N.getSimpleValueType();
22892 SmallVector<int, 4> Mask;
22894 switch (N.getOpcode()) {
22895 case X86ISD::PSHUFD:
22896 case X86ISD::PSHUFLW:
22897 case X86ISD::PSHUFHW:
22898 Mask = getPSHUFShuffleMask(N);
22899 assert(Mask.size() == 4);
22905 // Nuke no-op shuffles that show up after combining.
22906 if (isNoopShuffleMask(Mask))
22907 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22909 // Look for simplifications involving one or two shuffle instructions.
22910 SDValue V = N.getOperand(0);
22911 switch (N.getOpcode()) {
22914 case X86ISD::PSHUFLW:
22915 case X86ISD::PSHUFHW:
22916 assert(VT == MVT::v8i16);
22919 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22920 return SDValue(); // We combined away this shuffle, so we're done.
22922 // See if this reduces to a PSHUFD which is no more expensive and can
22923 // combine with more operations. Note that it has to at least flip the
22924 // dwords as otherwise it would have been removed as a no-op.
22925 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22926 int DMask[] = {0, 1, 2, 3};
22927 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22928 DMask[DOffset + 0] = DOffset + 1;
22929 DMask[DOffset + 1] = DOffset + 0;
22930 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22931 DCI.AddToWorklist(V.getNode());
22932 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22933 getV4X86ShuffleImm8ForMask(DMask, DAG));
22934 DCI.AddToWorklist(V.getNode());
22935 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22938 // Look for shuffle patterns which can be implemented as a single unpack.
22939 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22940 // only works when we have a PSHUFD followed by two half-shuffles.
22941 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22942 (V.getOpcode() == X86ISD::PSHUFLW ||
22943 V.getOpcode() == X86ISD::PSHUFHW) &&
22944 V.getOpcode() != N.getOpcode() &&
22946 SDValue D = V.getOperand(0);
22947 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22948 D = D.getOperand(0);
22949 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22950 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22951 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22952 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22953 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22955 for (int i = 0; i < 4; ++i) {
22956 WordMask[i + NOffset] = Mask[i] + NOffset;
22957 WordMask[i + VOffset] = VMask[i] + VOffset;
22959 // Map the word mask through the DWord mask.
22961 for (int i = 0; i < 8; ++i)
22962 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22963 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22964 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22965 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22966 std::begin(UnpackLoMask)) ||
22967 std::equal(std::begin(MappedMask), std::end(MappedMask),
22968 std::begin(UnpackHiMask))) {
22969 // We can replace all three shuffles with an unpack.
22970 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22971 DCI.AddToWorklist(V.getNode());
22972 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22974 DL, MVT::v8i16, V, V);
22981 case X86ISD::PSHUFD:
22982 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22991 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22993 /// We combine this directly on the abstract vector shuffle nodes so it is
22994 /// easier to generically match. We also insert dummy vector shuffle nodes for
22995 /// the operands which explicitly discard the lanes which are unused by this
22996 /// operation to try to flow through the rest of the combiner the fact that
22997 /// they're unused.
22998 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
23000 EVT VT = N->getValueType(0);
23002 // We only handle target-independent shuffles.
23003 // FIXME: It would be easy and harmless to use the target shuffle mask
23004 // extraction tool to support more.
23005 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
23008 auto *SVN = cast<ShuffleVectorSDNode>(N);
23009 ArrayRef<int> Mask = SVN->getMask();
23010 SDValue V1 = N->getOperand(0);
23011 SDValue V2 = N->getOperand(1);
23013 // We require the first shuffle operand to be the SUB node, and the second to
23014 // be the ADD node.
23015 // FIXME: We should support the commuted patterns.
23016 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
23019 // If there are other uses of these operations we can't fold them.
23020 if (!V1->hasOneUse() || !V2->hasOneUse())
23023 // Ensure that both operations have the same operands. Note that we can
23024 // commute the FADD operands.
23025 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
23026 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
23027 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
23030 // We're looking for blends between FADD and FSUB nodes. We insist on these
23031 // nodes being lined up in a specific expected pattern.
23032 if (!(isShuffleEquivalent(V1, V2, Mask, 0, 3) ||
23033 isShuffleEquivalent(V1, V2, Mask, 0, 5, 2, 7) ||
23034 isShuffleEquivalent(V1, V2, Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
23037 // Only specific types are legal at this point, assert so we notice if and
23038 // when these change.
23039 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
23040 VT == MVT::v4f64) &&
23041 "Unknown vector type encountered!");
23043 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
23046 /// PerformShuffleCombine - Performs several different shuffle combines.
23047 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
23048 TargetLowering::DAGCombinerInfo &DCI,
23049 const X86Subtarget *Subtarget) {
23051 SDValue N0 = N->getOperand(0);
23052 SDValue N1 = N->getOperand(1);
23053 EVT VT = N->getValueType(0);
23055 // Don't create instructions with illegal types after legalize types has run.
23056 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23057 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
23060 // If we have legalized the vector types, look for blends of FADD and FSUB
23061 // nodes that we can fuse into an ADDSUB node.
23062 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
23063 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
23066 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
23067 if (Subtarget->hasFp256() && VT.is256BitVector() &&
23068 N->getOpcode() == ISD::VECTOR_SHUFFLE)
23069 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
23071 // During Type Legalization, when promoting illegal vector types,
23072 // the backend might introduce new shuffle dag nodes and bitcasts.
23074 // This code performs the following transformation:
23075 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
23076 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
23078 // We do this only if both the bitcast and the BINOP dag nodes have
23079 // one use. Also, perform this transformation only if the new binary
23080 // operation is legal. This is to avoid introducing dag nodes that
23081 // potentially need to be further expanded (or custom lowered) into a
23082 // less optimal sequence of dag nodes.
23083 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
23084 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
23085 N0.getOpcode() == ISD::BITCAST) {
23086 SDValue BC0 = N0.getOperand(0);
23087 EVT SVT = BC0.getValueType();
23088 unsigned Opcode = BC0.getOpcode();
23089 unsigned NumElts = VT.getVectorNumElements();
23091 if (BC0.hasOneUse() && SVT.isVector() &&
23092 SVT.getVectorNumElements() * 2 == NumElts &&
23093 TLI.isOperationLegal(Opcode, VT)) {
23094 bool CanFold = false;
23106 unsigned SVTNumElts = SVT.getVectorNumElements();
23107 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
23108 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
23109 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
23110 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
23111 CanFold = SVOp->getMaskElt(i) < 0;
23114 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
23115 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
23116 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
23117 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
23122 // Only handle 128 wide vector from here on.
23123 if (!VT.is128BitVector())
23126 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
23127 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
23128 // consecutive, non-overlapping, and in the right order.
23129 SmallVector<SDValue, 16> Elts;
23130 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
23131 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
23133 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
23137 if (isTargetShuffle(N->getOpcode())) {
23139 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
23140 if (Shuffle.getNode())
23143 // Try recursively combining arbitrary sequences of x86 shuffle
23144 // instructions into higher-order shuffles. We do this after combining
23145 // specific PSHUF instruction sequences into their minimal form so that we
23146 // can evaluate how many specialized shuffle instructions are involved in
23147 // a particular chain.
23148 SmallVector<int, 1> NonceMask; // Just a placeholder.
23149 NonceMask.push_back(0);
23150 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
23151 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
23153 return SDValue(); // This routine will use CombineTo to replace N.
23159 /// PerformTruncateCombine - Converts truncate operation to
23160 /// a sequence of vector shuffle operations.
23161 /// It is possible when we truncate 256-bit vector to 128-bit vector
23162 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
23163 TargetLowering::DAGCombinerInfo &DCI,
23164 const X86Subtarget *Subtarget) {
23168 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
23169 /// specific shuffle of a load can be folded into a single element load.
23170 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
23171 /// shuffles have been custom lowered so we need to handle those here.
23172 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
23173 TargetLowering::DAGCombinerInfo &DCI) {
23174 if (DCI.isBeforeLegalizeOps())
23177 SDValue InVec = N->getOperand(0);
23178 SDValue EltNo = N->getOperand(1);
23180 if (!isa<ConstantSDNode>(EltNo))
23183 EVT OriginalVT = InVec.getValueType();
23185 if (InVec.getOpcode() == ISD::BITCAST) {
23186 // Don't duplicate a load with other uses.
23187 if (!InVec.hasOneUse())
23189 EVT BCVT = InVec.getOperand(0).getValueType();
23190 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
23192 InVec = InVec.getOperand(0);
23195 EVT CurrentVT = InVec.getValueType();
23197 if (!isTargetShuffle(InVec.getOpcode()))
23200 // Don't duplicate a load with other uses.
23201 if (!InVec.hasOneUse())
23204 SmallVector<int, 16> ShuffleMask;
23206 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
23207 ShuffleMask, UnaryShuffle))
23210 // Select the input vector, guarding against out of range extract vector.
23211 unsigned NumElems = CurrentVT.getVectorNumElements();
23212 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
23213 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
23214 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
23215 : InVec.getOperand(1);
23217 // If inputs to shuffle are the same for both ops, then allow 2 uses
23218 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
23219 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
23221 if (LdNode.getOpcode() == ISD::BITCAST) {
23222 // Don't duplicate a load with other uses.
23223 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
23226 AllowedUses = 1; // only allow 1 load use if we have a bitcast
23227 LdNode = LdNode.getOperand(0);
23230 if (!ISD::isNormalLoad(LdNode.getNode()))
23233 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
23235 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
23238 EVT EltVT = N->getValueType(0);
23239 // If there's a bitcast before the shuffle, check if the load type and
23240 // alignment is valid.
23241 unsigned Align = LN0->getAlignment();
23242 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23243 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
23244 EltVT.getTypeForEVT(*DAG.getContext()));
23246 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
23249 // All checks match so transform back to vector_shuffle so that DAG combiner
23250 // can finish the job
23253 // Create shuffle node taking into account the case that its a unary shuffle
23254 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
23255 : InVec.getOperand(1);
23256 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
23257 InVec.getOperand(0), Shuffle,
23259 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
23260 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
23264 /// \brief Detect bitcasts between i32 to x86mmx low word. Since MMX types are
23265 /// special and don't usually play with other vector types, it's better to
23266 /// handle them early to be sure we emit efficient code by avoiding
23267 /// store-load conversions.
23268 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG) {
23269 if (N->getValueType(0) != MVT::x86mmx ||
23270 N->getOperand(0)->getOpcode() != ISD::BUILD_VECTOR ||
23271 N->getOperand(0)->getValueType(0) != MVT::v2i32)
23274 SDValue V = N->getOperand(0);
23275 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1));
23276 if (C && C->getZExtValue() == 0 && V.getOperand(0).getValueType() == MVT::i32)
23277 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(V.getOperand(0)),
23278 N->getValueType(0), V.getOperand(0));
23283 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
23284 /// generation and convert it from being a bunch of shuffles and extracts
23285 /// into a somewhat faster sequence. For i686, the best sequence is apparently
23286 /// storing the value and loading scalars back, while for x64 we should
23287 /// use 64-bit extracts and shifts.
23288 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
23289 TargetLowering::DAGCombinerInfo &DCI) {
23290 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
23291 if (NewOp.getNode())
23294 SDValue InputVector = N->getOperand(0);
23296 // Detect mmx to i32 conversion through a v2i32 elt extract.
23297 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
23298 N->getValueType(0) == MVT::i32 &&
23299 InputVector.getValueType() == MVT::v2i32) {
23301 // The bitcast source is a direct mmx result.
23302 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
23303 if (MMXSrc.getValueType() == MVT::x86mmx)
23304 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23305 N->getValueType(0),
23306 InputVector.getNode()->getOperand(0));
23308 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
23309 SDValue MMXSrcOp = MMXSrc.getOperand(0);
23310 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
23311 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
23312 MMXSrcOp.getOpcode() == ISD::BITCAST &&
23313 MMXSrcOp.getValueType() == MVT::v1i64 &&
23314 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
23315 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23316 N->getValueType(0),
23317 MMXSrcOp.getOperand(0));
23320 // Only operate on vectors of 4 elements, where the alternative shuffling
23321 // gets to be more expensive.
23322 if (InputVector.getValueType() != MVT::v4i32)
23325 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
23326 // single use which is a sign-extend or zero-extend, and all elements are
23328 SmallVector<SDNode *, 4> Uses;
23329 unsigned ExtractedElements = 0;
23330 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
23331 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
23332 if (UI.getUse().getResNo() != InputVector.getResNo())
23335 SDNode *Extract = *UI;
23336 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23339 if (Extract->getValueType(0) != MVT::i32)
23341 if (!Extract->hasOneUse())
23343 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
23344 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
23346 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
23349 // Record which element was extracted.
23350 ExtractedElements |=
23351 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
23353 Uses.push_back(Extract);
23356 // If not all the elements were used, this may not be worthwhile.
23357 if (ExtractedElements != 15)
23360 // Ok, we've now decided to do the transformation.
23361 // If 64-bit shifts are legal, use the extract-shift sequence,
23362 // otherwise bounce the vector off the cache.
23363 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23365 SDLoc dl(InputVector);
23367 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
23368 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
23369 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
23370 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23371 DAG.getConstant(0, VecIdxTy));
23372 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23373 DAG.getConstant(1, VecIdxTy));
23375 SDValue ShAmt = DAG.getConstant(32,
23376 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
23377 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
23378 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23379 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
23380 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
23381 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23382 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
23384 // Store the value to a temporary stack slot.
23385 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
23386 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
23387 MachinePointerInfo(), false, false, 0);
23389 EVT ElementType = InputVector.getValueType().getVectorElementType();
23390 unsigned EltSize = ElementType.getSizeInBits() / 8;
23392 // Replace each use (extract) with a load of the appropriate element.
23393 for (unsigned i = 0; i < 4; ++i) {
23394 uint64_t Offset = EltSize * i;
23395 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
23397 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
23398 StackPtr, OffsetVal);
23400 // Load the scalar.
23401 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
23402 ScalarAddr, MachinePointerInfo(),
23403 false, false, false, 0);
23408 // Replace the extracts
23409 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
23410 UE = Uses.end(); UI != UE; ++UI) {
23411 SDNode *Extract = *UI;
23413 SDValue Idx = Extract->getOperand(1);
23414 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
23415 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
23418 // The replacement was made in place; don't return anything.
23422 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
23423 static std::pair<unsigned, bool>
23424 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
23425 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
23426 if (!VT.isVector())
23427 return std::make_pair(0, false);
23429 bool NeedSplit = false;
23430 switch (VT.getSimpleVT().SimpleTy) {
23431 default: return std::make_pair(0, false);
23434 if (!Subtarget->hasVLX())
23435 return std::make_pair(0, false);
23439 if (!Subtarget->hasBWI())
23440 return std::make_pair(0, false);
23444 if (!Subtarget->hasAVX512())
23445 return std::make_pair(0, false);
23450 if (!Subtarget->hasAVX2())
23452 if (!Subtarget->hasAVX())
23453 return std::make_pair(0, false);
23458 if (!Subtarget->hasSSE2())
23459 return std::make_pair(0, false);
23462 // SSE2 has only a small subset of the operations.
23463 bool hasUnsigned = Subtarget->hasSSE41() ||
23464 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23465 bool hasSigned = Subtarget->hasSSE41() ||
23466 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23468 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23471 // Check for x CC y ? x : y.
23472 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23473 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23478 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23481 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23484 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23487 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23489 // Check for x CC y ? y : x -- a min/max with reversed arms.
23490 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23491 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23496 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23499 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23502 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23505 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23509 return std::make_pair(Opc, NeedSplit);
23513 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23514 const X86Subtarget *Subtarget) {
23516 SDValue Cond = N->getOperand(0);
23517 SDValue LHS = N->getOperand(1);
23518 SDValue RHS = N->getOperand(2);
23520 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23521 SDValue CondSrc = Cond->getOperand(0);
23522 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23523 Cond = CondSrc->getOperand(0);
23526 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23529 // A vselect where all conditions and data are constants can be optimized into
23530 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23531 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23532 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23535 unsigned MaskValue = 0;
23536 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23539 MVT VT = N->getSimpleValueType(0);
23540 unsigned NumElems = VT.getVectorNumElements();
23541 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23542 for (unsigned i = 0; i < NumElems; ++i) {
23543 // Be sure we emit undef where we can.
23544 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23545 ShuffleMask[i] = -1;
23547 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23550 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23551 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23553 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23556 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23558 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23559 TargetLowering::DAGCombinerInfo &DCI,
23560 const X86Subtarget *Subtarget) {
23562 SDValue Cond = N->getOperand(0);
23563 // Get the LHS/RHS of the select.
23564 SDValue LHS = N->getOperand(1);
23565 SDValue RHS = N->getOperand(2);
23566 EVT VT = LHS.getValueType();
23567 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23569 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23570 // instructions match the semantics of the common C idiom x<y?x:y but not
23571 // x<=y?x:y, because of how they handle negative zero (which can be
23572 // ignored in unsafe-math mode).
23573 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23574 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23575 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23576 (Subtarget->hasSSE2() ||
23577 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23578 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23580 unsigned Opcode = 0;
23581 // Check for x CC y ? x : y.
23582 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23583 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23587 // Converting this to a min would handle NaNs incorrectly, and swapping
23588 // the operands would cause it to handle comparisons between positive
23589 // and negative zero incorrectly.
23590 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23591 if (!DAG.getTarget().Options.UnsafeFPMath &&
23592 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23594 std::swap(LHS, RHS);
23596 Opcode = X86ISD::FMIN;
23599 // Converting this to a min would handle comparisons between positive
23600 // and negative zero incorrectly.
23601 if (!DAG.getTarget().Options.UnsafeFPMath &&
23602 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23604 Opcode = X86ISD::FMIN;
23607 // Converting this to a min would handle both negative zeros and NaNs
23608 // incorrectly, but we can swap the operands to fix both.
23609 std::swap(LHS, RHS);
23613 Opcode = X86ISD::FMIN;
23617 // Converting this to a max would handle comparisons between positive
23618 // and negative zero incorrectly.
23619 if (!DAG.getTarget().Options.UnsafeFPMath &&
23620 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23622 Opcode = X86ISD::FMAX;
23625 // Converting this to a max would handle NaNs incorrectly, and swapping
23626 // the operands would cause it to handle comparisons between positive
23627 // and negative zero incorrectly.
23628 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23629 if (!DAG.getTarget().Options.UnsafeFPMath &&
23630 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23632 std::swap(LHS, RHS);
23634 Opcode = X86ISD::FMAX;
23637 // Converting this to a max would handle both negative zeros and NaNs
23638 // incorrectly, but we can swap the operands to fix both.
23639 std::swap(LHS, RHS);
23643 Opcode = X86ISD::FMAX;
23646 // Check for x CC y ? y : x -- a min/max with reversed arms.
23647 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23648 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23652 // Converting this to a min would handle comparisons between positive
23653 // and negative zero incorrectly, and swapping the operands would
23654 // cause it to handle NaNs incorrectly.
23655 if (!DAG.getTarget().Options.UnsafeFPMath &&
23656 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23657 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23659 std::swap(LHS, RHS);
23661 Opcode = X86ISD::FMIN;
23664 // Converting this to a min would handle NaNs incorrectly.
23665 if (!DAG.getTarget().Options.UnsafeFPMath &&
23666 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23668 Opcode = X86ISD::FMIN;
23671 // Converting this to a min would handle both negative zeros and NaNs
23672 // incorrectly, but we can swap the operands to fix both.
23673 std::swap(LHS, RHS);
23677 Opcode = X86ISD::FMIN;
23681 // Converting this to a max would handle NaNs incorrectly.
23682 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23684 Opcode = X86ISD::FMAX;
23687 // Converting this to a max would handle comparisons between positive
23688 // and negative zero incorrectly, and swapping the operands would
23689 // cause it to handle NaNs incorrectly.
23690 if (!DAG.getTarget().Options.UnsafeFPMath &&
23691 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23692 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23694 std::swap(LHS, RHS);
23696 Opcode = X86ISD::FMAX;
23699 // Converting this to a max would handle both negative zeros and NaNs
23700 // incorrectly, but we can swap the operands to fix both.
23701 std::swap(LHS, RHS);
23705 Opcode = X86ISD::FMAX;
23711 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23714 EVT CondVT = Cond.getValueType();
23715 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23716 CondVT.getVectorElementType() == MVT::i1) {
23717 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23718 // lowering on KNL. In this case we convert it to
23719 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23720 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23721 // Since SKX these selects have a proper lowering.
23722 EVT OpVT = LHS.getValueType();
23723 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23724 (OpVT.getVectorElementType() == MVT::i8 ||
23725 OpVT.getVectorElementType() == MVT::i16) &&
23726 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23727 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23728 DCI.AddToWorklist(Cond.getNode());
23729 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23732 // If this is a select between two integer constants, try to do some
23734 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23735 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23736 // Don't do this for crazy integer types.
23737 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23738 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23739 // so that TrueC (the true value) is larger than FalseC.
23740 bool NeedsCondInvert = false;
23742 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23743 // Efficiently invertible.
23744 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23745 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23746 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23747 NeedsCondInvert = true;
23748 std::swap(TrueC, FalseC);
23751 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23752 if (FalseC->getAPIntValue() == 0 &&
23753 TrueC->getAPIntValue().isPowerOf2()) {
23754 if (NeedsCondInvert) // Invert the condition if needed.
23755 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23756 DAG.getConstant(1, Cond.getValueType()));
23758 // Zero extend the condition if needed.
23759 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23761 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23762 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23763 DAG.getConstant(ShAmt, MVT::i8));
23766 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23767 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23768 if (NeedsCondInvert) // Invert the condition if needed.
23769 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23770 DAG.getConstant(1, Cond.getValueType()));
23772 // Zero extend the condition if needed.
23773 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23774 FalseC->getValueType(0), Cond);
23775 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23776 SDValue(FalseC, 0));
23779 // Optimize cases that will turn into an LEA instruction. This requires
23780 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23781 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23782 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23783 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23785 bool isFastMultiplier = false;
23787 switch ((unsigned char)Diff) {
23789 case 1: // result = add base, cond
23790 case 2: // result = lea base( , cond*2)
23791 case 3: // result = lea base(cond, cond*2)
23792 case 4: // result = lea base( , cond*4)
23793 case 5: // result = lea base(cond, cond*4)
23794 case 8: // result = lea base( , cond*8)
23795 case 9: // result = lea base(cond, cond*8)
23796 isFastMultiplier = true;
23801 if (isFastMultiplier) {
23802 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23803 if (NeedsCondInvert) // Invert the condition if needed.
23804 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23805 DAG.getConstant(1, Cond.getValueType()));
23807 // Zero extend the condition if needed.
23808 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23810 // Scale the condition by the difference.
23812 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23813 DAG.getConstant(Diff, Cond.getValueType()));
23815 // Add the base if non-zero.
23816 if (FalseC->getAPIntValue() != 0)
23817 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23818 SDValue(FalseC, 0));
23825 // Canonicalize max and min:
23826 // (x > y) ? x : y -> (x >= y) ? x : y
23827 // (x < y) ? x : y -> (x <= y) ? x : y
23828 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23829 // the need for an extra compare
23830 // against zero. e.g.
23831 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23833 // testl %edi, %edi
23835 // cmovgl %edi, %eax
23839 // cmovsl %eax, %edi
23840 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23841 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23842 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23843 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23848 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23849 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23850 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23851 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23856 // Early exit check
23857 if (!TLI.isTypeLegal(VT))
23860 // Match VSELECTs into subs with unsigned saturation.
23861 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23862 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23863 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23864 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23865 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23867 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23868 // left side invert the predicate to simplify logic below.
23870 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23872 CC = ISD::getSetCCInverse(CC, true);
23873 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23877 if (Other.getNode() && Other->getNumOperands() == 2 &&
23878 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23879 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23880 SDValue CondRHS = Cond->getOperand(1);
23882 // Look for a general sub with unsigned saturation first.
23883 // x >= y ? x-y : 0 --> subus x, y
23884 // x > y ? x-y : 0 --> subus x, y
23885 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23886 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23887 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23889 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23890 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23891 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23892 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23893 // If the RHS is a constant we have to reverse the const
23894 // canonicalization.
23895 // x > C-1 ? x+-C : 0 --> subus x, C
23896 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23897 CondRHSConst->getAPIntValue() ==
23898 (-OpRHSConst->getAPIntValue() - 1))
23899 return DAG.getNode(
23900 X86ISD::SUBUS, DL, VT, OpLHS,
23901 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23903 // Another special case: If C was a sign bit, the sub has been
23904 // canonicalized into a xor.
23905 // FIXME: Would it be better to use computeKnownBits to determine
23906 // whether it's safe to decanonicalize the xor?
23907 // x s< 0 ? x^C : 0 --> subus x, C
23908 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23909 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23910 OpRHSConst->getAPIntValue().isSignBit())
23911 // Note that we have to rebuild the RHS constant here to ensure we
23912 // don't rely on particular values of undef lanes.
23913 return DAG.getNode(
23914 X86ISD::SUBUS, DL, VT, OpLHS,
23915 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23920 // Try to match a min/max vector operation.
23921 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23922 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23923 unsigned Opc = ret.first;
23924 bool NeedSplit = ret.second;
23926 if (Opc && NeedSplit) {
23927 unsigned NumElems = VT.getVectorNumElements();
23928 // Extract the LHS vectors
23929 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23930 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23932 // Extract the RHS vectors
23933 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23934 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23936 // Create min/max for each subvector
23937 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23938 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23940 // Merge the result
23941 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23943 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23946 // Simplify vector selection if condition value type matches vselect
23948 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23949 assert(Cond.getValueType().isVector() &&
23950 "vector select expects a vector selector!");
23952 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23953 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23955 // Try invert the condition if true value is not all 1s and false value
23957 if (!TValIsAllOnes && !FValIsAllZeros &&
23958 // Check if the selector will be produced by CMPP*/PCMP*
23959 Cond.getOpcode() == ISD::SETCC &&
23960 // Check if SETCC has already been promoted
23961 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23962 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23963 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23965 if (TValIsAllZeros || FValIsAllOnes) {
23966 SDValue CC = Cond.getOperand(2);
23967 ISD::CondCode NewCC =
23968 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23969 Cond.getOperand(0).getValueType().isInteger());
23970 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23971 std::swap(LHS, RHS);
23972 TValIsAllOnes = FValIsAllOnes;
23973 FValIsAllZeros = TValIsAllZeros;
23977 if (TValIsAllOnes || FValIsAllZeros) {
23980 if (TValIsAllOnes && FValIsAllZeros)
23982 else if (TValIsAllOnes)
23983 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23984 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23985 else if (FValIsAllZeros)
23986 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23987 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23989 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23993 // If we know that this node is legal then we know that it is going to be
23994 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23995 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23996 // to simplify previous instructions.
23997 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23998 !DCI.isBeforeLegalize() &&
23999 // We explicitly check against v8i16 and v16i16 because, although
24000 // they're marked as Custom, they might only be legal when Cond is a
24001 // build_vector of constants. This will be taken care in a later
24003 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
24004 VT != MVT::v8i16) &&
24005 // Don't optimize vector of constants. Those are handled by
24006 // the generic code and all the bits must be properly set for
24007 // the generic optimizer.
24008 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
24009 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
24011 // Don't optimize vector selects that map to mask-registers.
24015 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
24016 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
24018 APInt KnownZero, KnownOne;
24019 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
24020 DCI.isBeforeLegalizeOps());
24021 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
24022 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
24024 // If we changed the computation somewhere in the DAG, this change
24025 // will affect all users of Cond.
24026 // Make sure it is fine and update all the nodes so that we do not
24027 // use the generic VSELECT anymore. Otherwise, we may perform
24028 // wrong optimizations as we messed up with the actual expectation
24029 // for the vector boolean values.
24030 if (Cond != TLO.Old) {
24031 // Check all uses of that condition operand to check whether it will be
24032 // consumed by non-BLEND instructions, which may depend on all bits are
24034 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
24036 if (I->getOpcode() != ISD::VSELECT)
24037 // TODO: Add other opcodes eventually lowered into BLEND.
24040 // Update all the users of the condition, before committing the change,
24041 // so that the VSELECT optimizations that expect the correct vector
24042 // boolean value will not be triggered.
24043 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
24045 DAG.ReplaceAllUsesOfValueWith(
24047 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
24048 Cond, I->getOperand(1), I->getOperand(2)));
24049 DCI.CommitTargetLoweringOpt(TLO);
24052 // At this point, only Cond is changed. Change the condition
24053 // just for N to keep the opportunity to optimize all other
24054 // users their own way.
24055 DAG.ReplaceAllUsesOfValueWith(
24057 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
24058 TLO.New, N->getOperand(1), N->getOperand(2)));
24063 // We should generate an X86ISD::BLENDI from a vselect if its argument
24064 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
24065 // constants. This specific pattern gets generated when we split a
24066 // selector for a 512 bit vector in a machine without AVX512 (but with
24067 // 256-bit vectors), during legalization:
24069 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
24071 // Iff we find this pattern and the build_vectors are built from
24072 // constants, we translate the vselect into a shuffle_vector that we
24073 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
24074 if ((N->getOpcode() == ISD::VSELECT ||
24075 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
24076 !DCI.isBeforeLegalize()) {
24077 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
24078 if (Shuffle.getNode())
24085 // Check whether a boolean test is testing a boolean value generated by
24086 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
24089 // Simplify the following patterns:
24090 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
24091 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
24092 // to (Op EFLAGS Cond)
24094 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
24095 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
24096 // to (Op EFLAGS !Cond)
24098 // where Op could be BRCOND or CMOV.
24100 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
24101 // Quit if not CMP and SUB with its value result used.
24102 if (Cmp.getOpcode() != X86ISD::CMP &&
24103 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
24106 // Quit if not used as a boolean value.
24107 if (CC != X86::COND_E && CC != X86::COND_NE)
24110 // Check CMP operands. One of them should be 0 or 1 and the other should be
24111 // an SetCC or extended from it.
24112 SDValue Op1 = Cmp.getOperand(0);
24113 SDValue Op2 = Cmp.getOperand(1);
24116 const ConstantSDNode* C = nullptr;
24117 bool needOppositeCond = (CC == X86::COND_E);
24118 bool checkAgainstTrue = false; // Is it a comparison against 1?
24120 if ((C = dyn_cast<ConstantSDNode>(Op1)))
24122 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
24124 else // Quit if all operands are not constants.
24127 if (C->getZExtValue() == 1) {
24128 needOppositeCond = !needOppositeCond;
24129 checkAgainstTrue = true;
24130 } else if (C->getZExtValue() != 0)
24131 // Quit if the constant is neither 0 or 1.
24134 bool truncatedToBoolWithAnd = false;
24135 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
24136 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
24137 SetCC.getOpcode() == ISD::TRUNCATE ||
24138 SetCC.getOpcode() == ISD::AND) {
24139 if (SetCC.getOpcode() == ISD::AND) {
24141 ConstantSDNode *CS;
24142 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
24143 CS->getZExtValue() == 1)
24145 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
24146 CS->getZExtValue() == 1)
24150 SetCC = SetCC.getOperand(OpIdx);
24151 truncatedToBoolWithAnd = true;
24153 SetCC = SetCC.getOperand(0);
24156 switch (SetCC.getOpcode()) {
24157 case X86ISD::SETCC_CARRY:
24158 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
24159 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
24160 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
24161 // truncated to i1 using 'and'.
24162 if (checkAgainstTrue && !truncatedToBoolWithAnd)
24164 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
24165 "Invalid use of SETCC_CARRY!");
24167 case X86ISD::SETCC:
24168 // Set the condition code or opposite one if necessary.
24169 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
24170 if (needOppositeCond)
24171 CC = X86::GetOppositeBranchCondition(CC);
24172 return SetCC.getOperand(1);
24173 case X86ISD::CMOV: {
24174 // Check whether false/true value has canonical one, i.e. 0 or 1.
24175 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
24176 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
24177 // Quit if true value is not a constant.
24180 // Quit if false value is not a constant.
24182 SDValue Op = SetCC.getOperand(0);
24183 // Skip 'zext' or 'trunc' node.
24184 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
24185 Op.getOpcode() == ISD::TRUNCATE)
24186 Op = Op.getOperand(0);
24187 // A special case for rdrand/rdseed, where 0 is set if false cond is
24189 if ((Op.getOpcode() != X86ISD::RDRAND &&
24190 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
24193 // Quit if false value is not the constant 0 or 1.
24194 bool FValIsFalse = true;
24195 if (FVal && FVal->getZExtValue() != 0) {
24196 if (FVal->getZExtValue() != 1)
24198 // If FVal is 1, opposite cond is needed.
24199 needOppositeCond = !needOppositeCond;
24200 FValIsFalse = false;
24202 // Quit if TVal is not the constant opposite of FVal.
24203 if (FValIsFalse && TVal->getZExtValue() != 1)
24205 if (!FValIsFalse && TVal->getZExtValue() != 0)
24207 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
24208 if (needOppositeCond)
24209 CC = X86::GetOppositeBranchCondition(CC);
24210 return SetCC.getOperand(3);
24217 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
24218 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
24219 TargetLowering::DAGCombinerInfo &DCI,
24220 const X86Subtarget *Subtarget) {
24223 // If the flag operand isn't dead, don't touch this CMOV.
24224 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
24227 SDValue FalseOp = N->getOperand(0);
24228 SDValue TrueOp = N->getOperand(1);
24229 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
24230 SDValue Cond = N->getOperand(3);
24232 if (CC == X86::COND_E || CC == X86::COND_NE) {
24233 switch (Cond.getOpcode()) {
24237 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
24238 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
24239 return (CC == X86::COND_E) ? FalseOp : TrueOp;
24245 Flags = checkBoolTestSetCCCombine(Cond, CC);
24246 if (Flags.getNode() &&
24247 // Extra check as FCMOV only supports a subset of X86 cond.
24248 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
24249 SDValue Ops[] = { FalseOp, TrueOp,
24250 DAG.getConstant(CC, MVT::i8), Flags };
24251 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
24254 // If this is a select between two integer constants, try to do some
24255 // optimizations. Note that the operands are ordered the opposite of SELECT
24257 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
24258 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
24259 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
24260 // larger than FalseC (the false value).
24261 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
24262 CC = X86::GetOppositeBranchCondition(CC);
24263 std::swap(TrueC, FalseC);
24264 std::swap(TrueOp, FalseOp);
24267 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
24268 // This is efficient for any integer data type (including i8/i16) and
24270 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
24271 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24272 DAG.getConstant(CC, MVT::i8), Cond);
24274 // Zero extend the condition if needed.
24275 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
24277 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
24278 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
24279 DAG.getConstant(ShAmt, MVT::i8));
24280 if (N->getNumValues() == 2) // Dead flag value?
24281 return DCI.CombineTo(N, Cond, SDValue());
24285 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
24286 // for any integer data type, including i8/i16.
24287 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
24288 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24289 DAG.getConstant(CC, MVT::i8), Cond);
24291 // Zero extend the condition if needed.
24292 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
24293 FalseC->getValueType(0), Cond);
24294 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24295 SDValue(FalseC, 0));
24297 if (N->getNumValues() == 2) // Dead flag value?
24298 return DCI.CombineTo(N, Cond, SDValue());
24302 // Optimize cases that will turn into an LEA instruction. This requires
24303 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
24304 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
24305 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
24306 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
24308 bool isFastMultiplier = false;
24310 switch ((unsigned char)Diff) {
24312 case 1: // result = add base, cond
24313 case 2: // result = lea base( , cond*2)
24314 case 3: // result = lea base(cond, cond*2)
24315 case 4: // result = lea base( , cond*4)
24316 case 5: // result = lea base(cond, cond*4)
24317 case 8: // result = lea base( , cond*8)
24318 case 9: // result = lea base(cond, cond*8)
24319 isFastMultiplier = true;
24324 if (isFastMultiplier) {
24325 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
24326 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24327 DAG.getConstant(CC, MVT::i8), Cond);
24328 // Zero extend the condition if needed.
24329 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
24331 // Scale the condition by the difference.
24333 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
24334 DAG.getConstant(Diff, Cond.getValueType()));
24336 // Add the base if non-zero.
24337 if (FalseC->getAPIntValue() != 0)
24338 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24339 SDValue(FalseC, 0));
24340 if (N->getNumValues() == 2) // Dead flag value?
24341 return DCI.CombineTo(N, Cond, SDValue());
24348 // Handle these cases:
24349 // (select (x != c), e, c) -> select (x != c), e, x),
24350 // (select (x == c), c, e) -> select (x == c), x, e)
24351 // where the c is an integer constant, and the "select" is the combination
24352 // of CMOV and CMP.
24354 // The rationale for this change is that the conditional-move from a constant
24355 // needs two instructions, however, conditional-move from a register needs
24356 // only one instruction.
24358 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
24359 // some instruction-combining opportunities. This opt needs to be
24360 // postponed as late as possible.
24362 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
24363 // the DCI.xxxx conditions are provided to postpone the optimization as
24364 // late as possible.
24366 ConstantSDNode *CmpAgainst = nullptr;
24367 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
24368 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
24369 !isa<ConstantSDNode>(Cond.getOperand(0))) {
24371 if (CC == X86::COND_NE &&
24372 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
24373 CC = X86::GetOppositeBranchCondition(CC);
24374 std::swap(TrueOp, FalseOp);
24377 if (CC == X86::COND_E &&
24378 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
24379 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24380 DAG.getConstant(CC, MVT::i8), Cond };
24381 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24389 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
24390 const X86Subtarget *Subtarget) {
24391 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
24393 default: return SDValue();
24394 // SSE/AVX/AVX2 blend intrinsics.
24395 case Intrinsic::x86_avx2_pblendvb:
24396 case Intrinsic::x86_avx2_pblendw:
24397 case Intrinsic::x86_avx2_pblendd_128:
24398 case Intrinsic::x86_avx2_pblendd_256:
24399 // Don't try to simplify this intrinsic if we don't have AVX2.
24400 if (!Subtarget->hasAVX2())
24403 case Intrinsic::x86_avx_blend_pd_256:
24404 case Intrinsic::x86_avx_blend_ps_256:
24405 case Intrinsic::x86_avx_blendv_pd_256:
24406 case Intrinsic::x86_avx_blendv_ps_256:
24407 // Don't try to simplify this intrinsic if we don't have AVX.
24408 if (!Subtarget->hasAVX())
24411 case Intrinsic::x86_sse41_pblendw:
24412 case Intrinsic::x86_sse41_blendpd:
24413 case Intrinsic::x86_sse41_blendps:
24414 case Intrinsic::x86_sse41_blendvps:
24415 case Intrinsic::x86_sse41_blendvpd:
24416 case Intrinsic::x86_sse41_pblendvb: {
24417 SDValue Op0 = N->getOperand(1);
24418 SDValue Op1 = N->getOperand(2);
24419 SDValue Mask = N->getOperand(3);
24421 // Don't try to simplify this intrinsic if we don't have SSE4.1.
24422 if (!Subtarget->hasSSE41())
24425 // fold (blend A, A, Mask) -> A
24428 // fold (blend A, B, allZeros) -> A
24429 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
24431 // fold (blend A, B, allOnes) -> B
24432 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24435 // Simplify the case where the mask is a constant i32 value.
24436 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
24437 if (C->isNullValue())
24439 if (C->isAllOnesValue())
24446 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24447 case Intrinsic::x86_sse2_psrai_w:
24448 case Intrinsic::x86_sse2_psrai_d:
24449 case Intrinsic::x86_avx2_psrai_w:
24450 case Intrinsic::x86_avx2_psrai_d:
24451 case Intrinsic::x86_sse2_psra_w:
24452 case Intrinsic::x86_sse2_psra_d:
24453 case Intrinsic::x86_avx2_psra_w:
24454 case Intrinsic::x86_avx2_psra_d: {
24455 SDValue Op0 = N->getOperand(1);
24456 SDValue Op1 = N->getOperand(2);
24457 EVT VT = Op0.getValueType();
24458 assert(VT.isVector() && "Expected a vector type!");
24460 if (isa<BuildVectorSDNode>(Op1))
24461 Op1 = Op1.getOperand(0);
24463 if (!isa<ConstantSDNode>(Op1))
24466 EVT SVT = VT.getVectorElementType();
24467 unsigned SVTBits = SVT.getSizeInBits();
24469 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24470 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24471 uint64_t ShAmt = C.getZExtValue();
24473 // Don't try to convert this shift into a ISD::SRA if the shift
24474 // count is bigger than or equal to the element size.
24475 if (ShAmt >= SVTBits)
24478 // Trivial case: if the shift count is zero, then fold this
24479 // into the first operand.
24483 // Replace this packed shift intrinsic with a target independent
24485 SDValue Splat = DAG.getConstant(C, VT);
24486 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24491 /// PerformMulCombine - Optimize a single multiply with constant into two
24492 /// in order to implement it with two cheaper instructions, e.g.
24493 /// LEA + SHL, LEA + LEA.
24494 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24495 TargetLowering::DAGCombinerInfo &DCI) {
24496 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24499 EVT VT = N->getValueType(0);
24500 if (VT != MVT::i64 && VT != MVT::i32)
24503 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24506 uint64_t MulAmt = C->getZExtValue();
24507 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24510 uint64_t MulAmt1 = 0;
24511 uint64_t MulAmt2 = 0;
24512 if ((MulAmt % 9) == 0) {
24514 MulAmt2 = MulAmt / 9;
24515 } else if ((MulAmt % 5) == 0) {
24517 MulAmt2 = MulAmt / 5;
24518 } else if ((MulAmt % 3) == 0) {
24520 MulAmt2 = MulAmt / 3;
24523 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24526 if (isPowerOf2_64(MulAmt2) &&
24527 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24528 // If second multiplifer is pow2, issue it first. We want the multiply by
24529 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24531 std::swap(MulAmt1, MulAmt2);
24534 if (isPowerOf2_64(MulAmt1))
24535 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24536 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24538 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24539 DAG.getConstant(MulAmt1, VT));
24541 if (isPowerOf2_64(MulAmt2))
24542 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24543 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24545 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24546 DAG.getConstant(MulAmt2, VT));
24548 // Do not add new nodes to DAG combiner worklist.
24549 DCI.CombineTo(N, NewMul, false);
24554 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24555 SDValue N0 = N->getOperand(0);
24556 SDValue N1 = N->getOperand(1);
24557 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24558 EVT VT = N0.getValueType();
24560 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24561 // since the result of setcc_c is all zero's or all ones.
24562 if (VT.isInteger() && !VT.isVector() &&
24563 N1C && N0.getOpcode() == ISD::AND &&
24564 N0.getOperand(1).getOpcode() == ISD::Constant) {
24565 SDValue N00 = N0.getOperand(0);
24566 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24567 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24568 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24569 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24570 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24571 APInt ShAmt = N1C->getAPIntValue();
24572 Mask = Mask.shl(ShAmt);
24574 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24575 N00, DAG.getConstant(Mask, VT));
24579 // Hardware support for vector shifts is sparse which makes us scalarize the
24580 // vector operations in many cases. Also, on sandybridge ADD is faster than
24582 // (shl V, 1) -> add V,V
24583 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24584 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24585 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24586 // We shift all of the values by one. In many cases we do not have
24587 // hardware support for this operation. This is better expressed as an ADD
24589 if (N1SplatC->getZExtValue() == 1)
24590 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24596 /// \brief Returns a vector of 0s if the node in input is a vector logical
24597 /// shift by a constant amount which is known to be bigger than or equal
24598 /// to the vector element size in bits.
24599 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24600 const X86Subtarget *Subtarget) {
24601 EVT VT = N->getValueType(0);
24603 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24604 (!Subtarget->hasInt256() ||
24605 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24608 SDValue Amt = N->getOperand(1);
24610 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24611 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24612 APInt ShiftAmt = AmtSplat->getAPIntValue();
24613 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24615 // SSE2/AVX2 logical shifts always return a vector of 0s
24616 // if the shift amount is bigger than or equal to
24617 // the element size. The constant shift amount will be
24618 // encoded as a 8-bit immediate.
24619 if (ShiftAmt.trunc(8).uge(MaxAmount))
24620 return getZeroVector(VT, Subtarget, DAG, DL);
24626 /// PerformShiftCombine - Combine shifts.
24627 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24628 TargetLowering::DAGCombinerInfo &DCI,
24629 const X86Subtarget *Subtarget) {
24630 if (N->getOpcode() == ISD::SHL) {
24631 SDValue V = PerformSHLCombine(N, DAG);
24632 if (V.getNode()) return V;
24635 if (N->getOpcode() != ISD::SRA) {
24636 // Try to fold this logical shift into a zero vector.
24637 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24638 if (V.getNode()) return V;
24644 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24645 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24646 // and friends. Likewise for OR -> CMPNEQSS.
24647 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24648 TargetLowering::DAGCombinerInfo &DCI,
24649 const X86Subtarget *Subtarget) {
24652 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24653 // we're requiring SSE2 for both.
24654 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24655 SDValue N0 = N->getOperand(0);
24656 SDValue N1 = N->getOperand(1);
24657 SDValue CMP0 = N0->getOperand(1);
24658 SDValue CMP1 = N1->getOperand(1);
24661 // The SETCCs should both refer to the same CMP.
24662 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24665 SDValue CMP00 = CMP0->getOperand(0);
24666 SDValue CMP01 = CMP0->getOperand(1);
24667 EVT VT = CMP00.getValueType();
24669 if (VT == MVT::f32 || VT == MVT::f64) {
24670 bool ExpectingFlags = false;
24671 // Check for any users that want flags:
24672 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24673 !ExpectingFlags && UI != UE; ++UI)
24674 switch (UI->getOpcode()) {
24679 ExpectingFlags = true;
24681 case ISD::CopyToReg:
24682 case ISD::SIGN_EXTEND:
24683 case ISD::ZERO_EXTEND:
24684 case ISD::ANY_EXTEND:
24688 if (!ExpectingFlags) {
24689 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24690 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24692 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24693 X86::CondCode tmp = cc0;
24698 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24699 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24700 // FIXME: need symbolic constants for these magic numbers.
24701 // See X86ATTInstPrinter.cpp:printSSECC().
24702 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24703 if (Subtarget->hasAVX512()) {
24704 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24705 CMP01, DAG.getConstant(x86cc, MVT::i8));
24706 if (N->getValueType(0) != MVT::i1)
24707 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24711 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24712 CMP00.getValueType(), CMP00, CMP01,
24713 DAG.getConstant(x86cc, MVT::i8));
24715 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24716 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24718 if (is64BitFP && !Subtarget->is64Bit()) {
24719 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24720 // 64-bit integer, since that's not a legal type. Since
24721 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24722 // bits, but can do this little dance to extract the lowest 32 bits
24723 // and work with those going forward.
24724 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24726 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24728 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24729 Vector32, DAG.getIntPtrConstant(0));
24733 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24734 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24735 DAG.getConstant(1, IntVT));
24736 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24737 return OneBitOfTruth;
24745 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24746 /// so it can be folded inside ANDNP.
24747 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24748 EVT VT = N->getValueType(0);
24750 // Match direct AllOnes for 128 and 256-bit vectors
24751 if (ISD::isBuildVectorAllOnes(N))
24754 // Look through a bit convert.
24755 if (N->getOpcode() == ISD::BITCAST)
24756 N = N->getOperand(0).getNode();
24758 // Sometimes the operand may come from a insert_subvector building a 256-bit
24760 if (VT.is256BitVector() &&
24761 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24762 SDValue V1 = N->getOperand(0);
24763 SDValue V2 = N->getOperand(1);
24765 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24766 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24767 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24768 ISD::isBuildVectorAllOnes(V2.getNode()))
24775 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24776 // register. In most cases we actually compare or select YMM-sized registers
24777 // and mixing the two types creates horrible code. This method optimizes
24778 // some of the transition sequences.
24779 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24780 TargetLowering::DAGCombinerInfo &DCI,
24781 const X86Subtarget *Subtarget) {
24782 EVT VT = N->getValueType(0);
24783 if (!VT.is256BitVector())
24786 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24787 N->getOpcode() == ISD::ZERO_EXTEND ||
24788 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24790 SDValue Narrow = N->getOperand(0);
24791 EVT NarrowVT = Narrow->getValueType(0);
24792 if (!NarrowVT.is128BitVector())
24795 if (Narrow->getOpcode() != ISD::XOR &&
24796 Narrow->getOpcode() != ISD::AND &&
24797 Narrow->getOpcode() != ISD::OR)
24800 SDValue N0 = Narrow->getOperand(0);
24801 SDValue N1 = Narrow->getOperand(1);
24804 // The Left side has to be a trunc.
24805 if (N0.getOpcode() != ISD::TRUNCATE)
24808 // The type of the truncated inputs.
24809 EVT WideVT = N0->getOperand(0)->getValueType(0);
24813 // The right side has to be a 'trunc' or a constant vector.
24814 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24815 ConstantSDNode *RHSConstSplat = nullptr;
24816 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24817 RHSConstSplat = RHSBV->getConstantSplatNode();
24818 if (!RHSTrunc && !RHSConstSplat)
24821 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24823 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24826 // Set N0 and N1 to hold the inputs to the new wide operation.
24827 N0 = N0->getOperand(0);
24828 if (RHSConstSplat) {
24829 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24830 SDValue(RHSConstSplat, 0));
24831 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24832 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24833 } else if (RHSTrunc) {
24834 N1 = N1->getOperand(0);
24837 // Generate the wide operation.
24838 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24839 unsigned Opcode = N->getOpcode();
24841 case ISD::ANY_EXTEND:
24843 case ISD::ZERO_EXTEND: {
24844 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24845 APInt Mask = APInt::getAllOnesValue(InBits);
24846 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24847 return DAG.getNode(ISD::AND, DL, VT,
24848 Op, DAG.getConstant(Mask, VT));
24850 case ISD::SIGN_EXTEND:
24851 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24852 Op, DAG.getValueType(NarrowVT));
24854 llvm_unreachable("Unexpected opcode");
24858 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24859 TargetLowering::DAGCombinerInfo &DCI,
24860 const X86Subtarget *Subtarget) {
24861 EVT VT = N->getValueType(0);
24862 if (DCI.isBeforeLegalizeOps())
24865 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24869 // Create BEXTR instructions
24870 // BEXTR is ((X >> imm) & (2**size-1))
24871 if (VT == MVT::i32 || VT == MVT::i64) {
24872 SDValue N0 = N->getOperand(0);
24873 SDValue N1 = N->getOperand(1);
24876 // Check for BEXTR.
24877 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24878 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24879 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24880 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24881 if (MaskNode && ShiftNode) {
24882 uint64_t Mask = MaskNode->getZExtValue();
24883 uint64_t Shift = ShiftNode->getZExtValue();
24884 if (isMask_64(Mask)) {
24885 uint64_t MaskSize = countPopulation(Mask);
24886 if (Shift + MaskSize <= VT.getSizeInBits())
24887 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24888 DAG.getConstant(Shift | (MaskSize << 8), VT));
24896 // Want to form ANDNP nodes:
24897 // 1) In the hopes of then easily combining them with OR and AND nodes
24898 // to form PBLEND/PSIGN.
24899 // 2) To match ANDN packed intrinsics
24900 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24903 SDValue N0 = N->getOperand(0);
24904 SDValue N1 = N->getOperand(1);
24907 // Check LHS for vnot
24908 if (N0.getOpcode() == ISD::XOR &&
24909 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24910 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24911 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24913 // Check RHS for vnot
24914 if (N1.getOpcode() == ISD::XOR &&
24915 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24916 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24917 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24922 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24923 TargetLowering::DAGCombinerInfo &DCI,
24924 const X86Subtarget *Subtarget) {
24925 if (DCI.isBeforeLegalizeOps())
24928 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24932 SDValue N0 = N->getOperand(0);
24933 SDValue N1 = N->getOperand(1);
24934 EVT VT = N->getValueType(0);
24936 // look for psign/blend
24937 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24938 if (!Subtarget->hasSSSE3() ||
24939 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24942 // Canonicalize pandn to RHS
24943 if (N0.getOpcode() == X86ISD::ANDNP)
24945 // or (and (m, y), (pandn m, x))
24946 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24947 SDValue Mask = N1.getOperand(0);
24948 SDValue X = N1.getOperand(1);
24950 if (N0.getOperand(0) == Mask)
24951 Y = N0.getOperand(1);
24952 if (N0.getOperand(1) == Mask)
24953 Y = N0.getOperand(0);
24955 // Check to see if the mask appeared in both the AND and ANDNP and
24959 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24960 // Look through mask bitcast.
24961 if (Mask.getOpcode() == ISD::BITCAST)
24962 Mask = Mask.getOperand(0);
24963 if (X.getOpcode() == ISD::BITCAST)
24964 X = X.getOperand(0);
24965 if (Y.getOpcode() == ISD::BITCAST)
24966 Y = Y.getOperand(0);
24968 EVT MaskVT = Mask.getValueType();
24970 // Validate that the Mask operand is a vector sra node.
24971 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24972 // there is no psrai.b
24973 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24974 unsigned SraAmt = ~0;
24975 if (Mask.getOpcode() == ISD::SRA) {
24976 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24977 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24978 SraAmt = AmtConst->getZExtValue();
24979 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24980 SDValue SraC = Mask.getOperand(1);
24981 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24983 if ((SraAmt + 1) != EltBits)
24988 // Now we know we at least have a plendvb with the mask val. See if
24989 // we can form a psignb/w/d.
24990 // psign = x.type == y.type == mask.type && y = sub(0, x);
24991 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24992 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24993 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24994 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24995 "Unsupported VT for PSIGN");
24996 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24997 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24999 // PBLENDVB only available on SSE 4.1
25000 if (!Subtarget->hasSSE41())
25003 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
25005 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
25006 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
25007 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
25008 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
25009 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
25013 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
25016 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
25017 MachineFunction &MF = DAG.getMachineFunction();
25019 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
25021 // SHLD/SHRD instructions have lower register pressure, but on some
25022 // platforms they have higher latency than the equivalent
25023 // series of shifts/or that would otherwise be generated.
25024 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
25025 // have higher latencies and we are not optimizing for size.
25026 if (!OptForSize && Subtarget->isSHLDSlow())
25029 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
25031 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
25033 if (!N0.hasOneUse() || !N1.hasOneUse())
25036 SDValue ShAmt0 = N0.getOperand(1);
25037 if (ShAmt0.getValueType() != MVT::i8)
25039 SDValue ShAmt1 = N1.getOperand(1);
25040 if (ShAmt1.getValueType() != MVT::i8)
25042 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
25043 ShAmt0 = ShAmt0.getOperand(0);
25044 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
25045 ShAmt1 = ShAmt1.getOperand(0);
25048 unsigned Opc = X86ISD::SHLD;
25049 SDValue Op0 = N0.getOperand(0);
25050 SDValue Op1 = N1.getOperand(0);
25051 if (ShAmt0.getOpcode() == ISD::SUB) {
25052 Opc = X86ISD::SHRD;
25053 std::swap(Op0, Op1);
25054 std::swap(ShAmt0, ShAmt1);
25057 unsigned Bits = VT.getSizeInBits();
25058 if (ShAmt1.getOpcode() == ISD::SUB) {
25059 SDValue Sum = ShAmt1.getOperand(0);
25060 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
25061 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
25062 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
25063 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
25064 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
25065 return DAG.getNode(Opc, DL, VT,
25067 DAG.getNode(ISD::TRUNCATE, DL,
25070 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
25071 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
25073 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
25074 return DAG.getNode(Opc, DL, VT,
25075 N0.getOperand(0), N1.getOperand(0),
25076 DAG.getNode(ISD::TRUNCATE, DL,
25083 // Generate NEG and CMOV for integer abs.
25084 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
25085 EVT VT = N->getValueType(0);
25087 // Since X86 does not have CMOV for 8-bit integer, we don't convert
25088 // 8-bit integer abs to NEG and CMOV.
25089 if (VT.isInteger() && VT.getSizeInBits() == 8)
25092 SDValue N0 = N->getOperand(0);
25093 SDValue N1 = N->getOperand(1);
25096 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
25097 // and change it to SUB and CMOV.
25098 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
25099 N0.getOpcode() == ISD::ADD &&
25100 N0.getOperand(1) == N1 &&
25101 N1.getOpcode() == ISD::SRA &&
25102 N1.getOperand(0) == N0.getOperand(0))
25103 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
25104 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
25105 // Generate SUB & CMOV.
25106 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
25107 DAG.getConstant(0, VT), N0.getOperand(0));
25109 SDValue Ops[] = { N0.getOperand(0), Neg,
25110 DAG.getConstant(X86::COND_GE, MVT::i8),
25111 SDValue(Neg.getNode(), 1) };
25112 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
25117 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
25118 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
25119 TargetLowering::DAGCombinerInfo &DCI,
25120 const X86Subtarget *Subtarget) {
25121 if (DCI.isBeforeLegalizeOps())
25124 if (Subtarget->hasCMov()) {
25125 SDValue RV = performIntegerAbsCombine(N, DAG);
25133 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
25134 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
25135 TargetLowering::DAGCombinerInfo &DCI,
25136 const X86Subtarget *Subtarget) {
25137 LoadSDNode *Ld = cast<LoadSDNode>(N);
25138 EVT RegVT = Ld->getValueType(0);
25139 EVT MemVT = Ld->getMemoryVT();
25141 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25143 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
25144 // into two 16-byte operations.
25145 ISD::LoadExtType Ext = Ld->getExtensionType();
25146 unsigned Alignment = Ld->getAlignment();
25147 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
25148 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25149 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
25150 unsigned NumElems = RegVT.getVectorNumElements();
25154 SDValue Ptr = Ld->getBasePtr();
25155 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
25157 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
25159 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25160 Ld->getPointerInfo(), Ld->isVolatile(),
25161 Ld->isNonTemporal(), Ld->isInvariant(),
25163 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25164 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25165 Ld->getPointerInfo(), Ld->isVolatile(),
25166 Ld->isNonTemporal(), Ld->isInvariant(),
25167 std::min(16U, Alignment));
25168 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
25170 Load2.getValue(1));
25172 SDValue NewVec = DAG.getUNDEF(RegVT);
25173 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
25174 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
25175 return DCI.CombineTo(N, NewVec, TF, true);
25181 /// PerformMLOADCombine - Resolve extending loads
25182 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
25183 TargetLowering::DAGCombinerInfo &DCI,
25184 const X86Subtarget *Subtarget) {
25185 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
25186 if (Mld->getExtensionType() != ISD::SEXTLOAD)
25189 EVT VT = Mld->getValueType(0);
25190 unsigned NumElems = VT.getVectorNumElements();
25191 EVT LdVT = Mld->getMemoryVT();
25194 assert(LdVT != VT && "Cannot extend to the same type");
25195 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
25196 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
25197 // From, To sizes and ElemCount must be pow of two
25198 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25199 "Unexpected size for extending masked load");
25201 unsigned SizeRatio = ToSz / FromSz;
25202 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
25204 // Create a type on which we perform the shuffle
25205 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25206 LdVT.getScalarType(), NumElems*SizeRatio);
25207 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25209 // Convert Src0 value
25210 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
25211 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
25212 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25213 for (unsigned i = 0; i != NumElems; ++i)
25214 ShuffleVec[i] = i * SizeRatio;
25216 // Can't shuffle using an illegal type.
25217 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25218 && "WideVecVT should be legal");
25219 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
25220 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
25222 // Prepare the new mask
25224 SDValue Mask = Mld->getMask();
25225 if (Mask.getValueType() == VT) {
25226 // Mask and original value have the same type
25227 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25228 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25229 for (unsigned i = 0; i != NumElems; ++i)
25230 ShuffleVec[i] = i * SizeRatio;
25231 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25232 ShuffleVec[i] = NumElems*SizeRatio;
25233 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25234 DAG.getConstant(0, WideVecVT),
25238 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25239 unsigned WidenNumElts = NumElems*SizeRatio;
25240 unsigned MaskNumElts = VT.getVectorNumElements();
25241 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25244 unsigned NumConcat = WidenNumElts / MaskNumElts;
25245 SmallVector<SDValue, 16> Ops(NumConcat);
25246 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25248 for (unsigned i = 1; i != NumConcat; ++i)
25251 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25254 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
25255 Mld->getBasePtr(), NewMask, WideSrc0,
25256 Mld->getMemoryVT(), Mld->getMemOperand(),
25258 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
25259 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
25262 /// PerformMSTORECombine - Resolve truncating stores
25263 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
25264 const X86Subtarget *Subtarget) {
25265 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
25266 if (!Mst->isTruncatingStore())
25269 EVT VT = Mst->getValue().getValueType();
25270 unsigned NumElems = VT.getVectorNumElements();
25271 EVT StVT = Mst->getMemoryVT();
25274 assert(StVT != VT && "Cannot truncate to the same type");
25275 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25276 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25278 // From, To sizes and ElemCount must be pow of two
25279 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25280 "Unexpected size for truncating masked store");
25281 // We are going to use the original vector elt for storing.
25282 // Accumulated smaller vector elements must be a multiple of the store size.
25283 assert (((NumElems * FromSz) % ToSz) == 0 &&
25284 "Unexpected ratio for truncating masked store");
25286 unsigned SizeRatio = FromSz / ToSz;
25287 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25289 // Create a type on which we perform the shuffle
25290 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25291 StVT.getScalarType(), NumElems*SizeRatio);
25293 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25295 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
25296 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25297 for (unsigned i = 0; i != NumElems; ++i)
25298 ShuffleVec[i] = i * SizeRatio;
25300 // Can't shuffle using an illegal type.
25301 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25302 && "WideVecVT should be legal");
25304 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25305 DAG.getUNDEF(WideVecVT),
25309 SDValue Mask = Mst->getMask();
25310 if (Mask.getValueType() == VT) {
25311 // Mask and original value have the same type
25312 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25313 for (unsigned i = 0; i != NumElems; ++i)
25314 ShuffleVec[i] = i * SizeRatio;
25315 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25316 ShuffleVec[i] = NumElems*SizeRatio;
25317 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25318 DAG.getConstant(0, WideVecVT),
25322 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25323 unsigned WidenNumElts = NumElems*SizeRatio;
25324 unsigned MaskNumElts = VT.getVectorNumElements();
25325 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25328 unsigned NumConcat = WidenNumElts / MaskNumElts;
25329 SmallVector<SDValue, 16> Ops(NumConcat);
25330 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25332 for (unsigned i = 1; i != NumConcat; ++i)
25335 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25338 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
25339 NewMask, StVT, Mst->getMemOperand(), false);
25341 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
25342 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
25343 const X86Subtarget *Subtarget) {
25344 StoreSDNode *St = cast<StoreSDNode>(N);
25345 EVT VT = St->getValue().getValueType();
25346 EVT StVT = St->getMemoryVT();
25348 SDValue StoredVal = St->getOperand(1);
25349 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25351 // If we are saving a concatenation of two XMM registers and 32-byte stores
25352 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
25353 unsigned Alignment = St->getAlignment();
25354 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
25355 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25356 StVT == VT && !IsAligned) {
25357 unsigned NumElems = VT.getVectorNumElements();
25361 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
25362 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
25364 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
25365 SDValue Ptr0 = St->getBasePtr();
25366 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
25368 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
25369 St->getPointerInfo(), St->isVolatile(),
25370 St->isNonTemporal(), Alignment);
25371 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
25372 St->getPointerInfo(), St->isVolatile(),
25373 St->isNonTemporal(),
25374 std::min(16U, Alignment));
25375 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
25378 // Optimize trunc store (of multiple scalars) to shuffle and store.
25379 // First, pack all of the elements in one place. Next, store to memory
25380 // in fewer chunks.
25381 if (St->isTruncatingStore() && VT.isVector()) {
25382 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25383 unsigned NumElems = VT.getVectorNumElements();
25384 assert(StVT != VT && "Cannot truncate to the same type");
25385 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25386 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25388 // From, To sizes and ElemCount must be pow of two
25389 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
25390 // We are going to use the original vector elt for storing.
25391 // Accumulated smaller vector elements must be a multiple of the store size.
25392 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
25394 unsigned SizeRatio = FromSz / ToSz;
25396 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25398 // Create a type on which we perform the shuffle
25399 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25400 StVT.getScalarType(), NumElems*SizeRatio);
25402 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25404 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
25405 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
25406 for (unsigned i = 0; i != NumElems; ++i)
25407 ShuffleVec[i] = i * SizeRatio;
25409 // Can't shuffle using an illegal type.
25410 if (!TLI.isTypeLegal(WideVecVT))
25413 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25414 DAG.getUNDEF(WideVecVT),
25416 // At this point all of the data is stored at the bottom of the
25417 // register. We now need to save it to mem.
25419 // Find the largest store unit
25420 MVT StoreType = MVT::i8;
25421 for (MVT Tp : MVT::integer_valuetypes()) {
25422 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
25426 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
25427 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
25428 (64 <= NumElems * ToSz))
25429 StoreType = MVT::f64;
25431 // Bitcast the original vector into a vector of store-size units
25432 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
25433 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
25434 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
25435 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
25436 SmallVector<SDValue, 8> Chains;
25437 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25438 TLI.getPointerTy());
25439 SDValue Ptr = St->getBasePtr();
25441 // Perform one or more big stores into memory.
25442 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25443 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25444 StoreType, ShuffWide,
25445 DAG.getIntPtrConstant(i));
25446 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25447 St->getPointerInfo(), St->isVolatile(),
25448 St->isNonTemporal(), St->getAlignment());
25449 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25450 Chains.push_back(Ch);
25453 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25456 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25457 // the FP state in cases where an emms may be missing.
25458 // A preferable solution to the general problem is to figure out the right
25459 // places to insert EMMS. This qualifies as a quick hack.
25461 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25462 if (VT.getSizeInBits() != 64)
25465 const Function *F = DAG.getMachineFunction().getFunction();
25466 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
25467 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25468 && Subtarget->hasSSE2();
25469 if ((VT.isVector() ||
25470 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25471 isa<LoadSDNode>(St->getValue()) &&
25472 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25473 St->getChain().hasOneUse() && !St->isVolatile()) {
25474 SDNode* LdVal = St->getValue().getNode();
25475 LoadSDNode *Ld = nullptr;
25476 int TokenFactorIndex = -1;
25477 SmallVector<SDValue, 8> Ops;
25478 SDNode* ChainVal = St->getChain().getNode();
25479 // Must be a store of a load. We currently handle two cases: the load
25480 // is a direct child, and it's under an intervening TokenFactor. It is
25481 // possible to dig deeper under nested TokenFactors.
25482 if (ChainVal == LdVal)
25483 Ld = cast<LoadSDNode>(St->getChain());
25484 else if (St->getValue().hasOneUse() &&
25485 ChainVal->getOpcode() == ISD::TokenFactor) {
25486 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25487 if (ChainVal->getOperand(i).getNode() == LdVal) {
25488 TokenFactorIndex = i;
25489 Ld = cast<LoadSDNode>(St->getValue());
25491 Ops.push_back(ChainVal->getOperand(i));
25495 if (!Ld || !ISD::isNormalLoad(Ld))
25498 // If this is not the MMX case, i.e. we are just turning i64 load/store
25499 // into f64 load/store, avoid the transformation if there are multiple
25500 // uses of the loaded value.
25501 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25506 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25507 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25509 if (Subtarget->is64Bit() || F64IsLegal) {
25510 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25511 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25512 Ld->getPointerInfo(), Ld->isVolatile(),
25513 Ld->isNonTemporal(), Ld->isInvariant(),
25514 Ld->getAlignment());
25515 SDValue NewChain = NewLd.getValue(1);
25516 if (TokenFactorIndex != -1) {
25517 Ops.push_back(NewChain);
25518 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25520 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25521 St->getPointerInfo(),
25522 St->isVolatile(), St->isNonTemporal(),
25523 St->getAlignment());
25526 // Otherwise, lower to two pairs of 32-bit loads / stores.
25527 SDValue LoAddr = Ld->getBasePtr();
25528 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25529 DAG.getConstant(4, MVT::i32));
25531 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25532 Ld->getPointerInfo(),
25533 Ld->isVolatile(), Ld->isNonTemporal(),
25534 Ld->isInvariant(), Ld->getAlignment());
25535 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25536 Ld->getPointerInfo().getWithOffset(4),
25537 Ld->isVolatile(), Ld->isNonTemporal(),
25539 MinAlign(Ld->getAlignment(), 4));
25541 SDValue NewChain = LoLd.getValue(1);
25542 if (TokenFactorIndex != -1) {
25543 Ops.push_back(LoLd);
25544 Ops.push_back(HiLd);
25545 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25548 LoAddr = St->getBasePtr();
25549 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25550 DAG.getConstant(4, MVT::i32));
25552 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25553 St->getPointerInfo(),
25554 St->isVolatile(), St->isNonTemporal(),
25555 St->getAlignment());
25556 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25557 St->getPointerInfo().getWithOffset(4),
25559 St->isNonTemporal(),
25560 MinAlign(St->getAlignment(), 4));
25561 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25566 /// Return 'true' if this vector operation is "horizontal"
25567 /// and return the operands for the horizontal operation in LHS and RHS. A
25568 /// horizontal operation performs the binary operation on successive elements
25569 /// of its first operand, then on successive elements of its second operand,
25570 /// returning the resulting values in a vector. For example, if
25571 /// A = < float a0, float a1, float a2, float a3 >
25573 /// B = < float b0, float b1, float b2, float b3 >
25574 /// then the result of doing a horizontal operation on A and B is
25575 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25576 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25577 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25578 /// set to A, RHS to B, and the routine returns 'true'.
25579 /// Note that the binary operation should have the property that if one of the
25580 /// operands is UNDEF then the result is UNDEF.
25581 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25582 // Look for the following pattern: if
25583 // A = < float a0, float a1, float a2, float a3 >
25584 // B = < float b0, float b1, float b2, float b3 >
25586 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25587 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25588 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25589 // which is A horizontal-op B.
25591 // At least one of the operands should be a vector shuffle.
25592 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25593 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25596 MVT VT = LHS.getSimpleValueType();
25598 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25599 "Unsupported vector type for horizontal add/sub");
25601 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25602 // operate independently on 128-bit lanes.
25603 unsigned NumElts = VT.getVectorNumElements();
25604 unsigned NumLanes = VT.getSizeInBits()/128;
25605 unsigned NumLaneElts = NumElts / NumLanes;
25606 assert((NumLaneElts % 2 == 0) &&
25607 "Vector type should have an even number of elements in each lane");
25608 unsigned HalfLaneElts = NumLaneElts/2;
25610 // View LHS in the form
25611 // LHS = VECTOR_SHUFFLE A, B, LMask
25612 // If LHS is not a shuffle then pretend it is the shuffle
25613 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25614 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25617 SmallVector<int, 16> LMask(NumElts);
25618 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25619 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25620 A = LHS.getOperand(0);
25621 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25622 B = LHS.getOperand(1);
25623 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25624 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25626 if (LHS.getOpcode() != ISD::UNDEF)
25628 for (unsigned i = 0; i != NumElts; ++i)
25632 // Likewise, view RHS in the form
25633 // RHS = VECTOR_SHUFFLE C, D, RMask
25635 SmallVector<int, 16> RMask(NumElts);
25636 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25637 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25638 C = RHS.getOperand(0);
25639 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25640 D = RHS.getOperand(1);
25641 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25642 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25644 if (RHS.getOpcode() != ISD::UNDEF)
25646 for (unsigned i = 0; i != NumElts; ++i)
25650 // Check that the shuffles are both shuffling the same vectors.
25651 if (!(A == C && B == D) && !(A == D && B == C))
25654 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25655 if (!A.getNode() && !B.getNode())
25658 // If A and B occur in reverse order in RHS, then "swap" them (which means
25659 // rewriting the mask).
25661 CommuteVectorShuffleMask(RMask, NumElts);
25663 // At this point LHS and RHS are equivalent to
25664 // LHS = VECTOR_SHUFFLE A, B, LMask
25665 // RHS = VECTOR_SHUFFLE A, B, RMask
25666 // Check that the masks correspond to performing a horizontal operation.
25667 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25668 for (unsigned i = 0; i != NumLaneElts; ++i) {
25669 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25671 // Ignore any UNDEF components.
25672 if (LIdx < 0 || RIdx < 0 ||
25673 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25674 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25677 // Check that successive elements are being operated on. If not, this is
25678 // not a horizontal operation.
25679 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25680 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25681 if (!(LIdx == Index && RIdx == Index + 1) &&
25682 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25687 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25688 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25692 /// Do target-specific dag combines on floating point adds.
25693 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25694 const X86Subtarget *Subtarget) {
25695 EVT VT = N->getValueType(0);
25696 SDValue LHS = N->getOperand(0);
25697 SDValue RHS = N->getOperand(1);
25699 // Try to synthesize horizontal adds from adds of shuffles.
25700 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25701 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25702 isHorizontalBinOp(LHS, RHS, true))
25703 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25707 /// Do target-specific dag combines on floating point subs.
25708 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25709 const X86Subtarget *Subtarget) {
25710 EVT VT = N->getValueType(0);
25711 SDValue LHS = N->getOperand(0);
25712 SDValue RHS = N->getOperand(1);
25714 // Try to synthesize horizontal subs from subs of shuffles.
25715 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25716 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25717 isHorizontalBinOp(LHS, RHS, false))
25718 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25722 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25723 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25724 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25726 // F[X]OR(0.0, x) -> x
25727 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25728 if (C->getValueAPF().isPosZero())
25729 return N->getOperand(1);
25731 // F[X]OR(x, 0.0) -> x
25732 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25733 if (C->getValueAPF().isPosZero())
25734 return N->getOperand(0);
25738 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25739 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25740 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25742 // Only perform optimizations if UnsafeMath is used.
25743 if (!DAG.getTarget().Options.UnsafeFPMath)
25746 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25747 // into FMINC and FMAXC, which are Commutative operations.
25748 unsigned NewOp = 0;
25749 switch (N->getOpcode()) {
25750 default: llvm_unreachable("unknown opcode");
25751 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25752 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25755 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25756 N->getOperand(0), N->getOperand(1));
25759 /// Do target-specific dag combines on X86ISD::FAND nodes.
25760 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25761 // FAND(0.0, x) -> 0.0
25762 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25763 if (C->getValueAPF().isPosZero())
25764 return N->getOperand(0);
25766 // FAND(x, 0.0) -> 0.0
25767 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25768 if (C->getValueAPF().isPosZero())
25769 return N->getOperand(1);
25774 /// Do target-specific dag combines on X86ISD::FANDN nodes
25775 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25776 // FANDN(0.0, x) -> x
25777 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25778 if (C->getValueAPF().isPosZero())
25779 return N->getOperand(1);
25781 // FANDN(x, 0.0) -> 0.0
25782 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25783 if (C->getValueAPF().isPosZero())
25784 return N->getOperand(1);
25789 static SDValue PerformBTCombine(SDNode *N,
25791 TargetLowering::DAGCombinerInfo &DCI) {
25792 // BT ignores high bits in the bit index operand.
25793 SDValue Op1 = N->getOperand(1);
25794 if (Op1.hasOneUse()) {
25795 unsigned BitWidth = Op1.getValueSizeInBits();
25796 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25797 APInt KnownZero, KnownOne;
25798 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25799 !DCI.isBeforeLegalizeOps());
25800 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25801 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25802 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25803 DCI.CommitTargetLoweringOpt(TLO);
25808 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25809 SDValue Op = N->getOperand(0);
25810 if (Op.getOpcode() == ISD::BITCAST)
25811 Op = Op.getOperand(0);
25812 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25813 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25814 VT.getVectorElementType().getSizeInBits() ==
25815 OpVT.getVectorElementType().getSizeInBits()) {
25816 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25821 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25822 const X86Subtarget *Subtarget) {
25823 EVT VT = N->getValueType(0);
25824 if (!VT.isVector())
25827 SDValue N0 = N->getOperand(0);
25828 SDValue N1 = N->getOperand(1);
25829 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25832 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25833 // both SSE and AVX2 since there is no sign-extended shift right
25834 // operation on a vector with 64-bit elements.
25835 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25836 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25837 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25838 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25839 SDValue N00 = N0.getOperand(0);
25841 // EXTLOAD has a better solution on AVX2,
25842 // it may be replaced with X86ISD::VSEXT node.
25843 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25844 if (!ISD::isNormalLoad(N00.getNode()))
25847 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25848 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25850 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25856 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25857 TargetLowering::DAGCombinerInfo &DCI,
25858 const X86Subtarget *Subtarget) {
25859 SDValue N0 = N->getOperand(0);
25860 EVT VT = N->getValueType(0);
25862 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25863 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25864 // This exposes the sext to the sdivrem lowering, so that it directly extends
25865 // from AH (which we otherwise need to do contortions to access).
25866 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25867 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25869 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25870 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25871 N0.getOperand(0), N0.getOperand(1));
25872 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25873 return R.getValue(1);
25876 if (!DCI.isBeforeLegalizeOps())
25879 if (!Subtarget->hasFp256())
25882 if (VT.isVector() && VT.getSizeInBits() == 256) {
25883 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25891 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25892 const X86Subtarget* Subtarget) {
25894 EVT VT = N->getValueType(0);
25896 // Let legalize expand this if it isn't a legal type yet.
25897 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25900 EVT ScalarVT = VT.getScalarType();
25901 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25902 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25905 SDValue A = N->getOperand(0);
25906 SDValue B = N->getOperand(1);
25907 SDValue C = N->getOperand(2);
25909 bool NegA = (A.getOpcode() == ISD::FNEG);
25910 bool NegB = (B.getOpcode() == ISD::FNEG);
25911 bool NegC = (C.getOpcode() == ISD::FNEG);
25913 // Negative multiplication when NegA xor NegB
25914 bool NegMul = (NegA != NegB);
25916 A = A.getOperand(0);
25918 B = B.getOperand(0);
25920 C = C.getOperand(0);
25924 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25926 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25928 return DAG.getNode(Opcode, dl, VT, A, B, C);
25931 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25932 TargetLowering::DAGCombinerInfo &DCI,
25933 const X86Subtarget *Subtarget) {
25934 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25935 // (and (i32 x86isd::setcc_carry), 1)
25936 // This eliminates the zext. This transformation is necessary because
25937 // ISD::SETCC is always legalized to i8.
25939 SDValue N0 = N->getOperand(0);
25940 EVT VT = N->getValueType(0);
25942 if (N0.getOpcode() == ISD::AND &&
25944 N0.getOperand(0).hasOneUse()) {
25945 SDValue N00 = N0.getOperand(0);
25946 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25947 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25948 if (!C || C->getZExtValue() != 1)
25950 return DAG.getNode(ISD::AND, dl, VT,
25951 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25952 N00.getOperand(0), N00.getOperand(1)),
25953 DAG.getConstant(1, VT));
25957 if (N0.getOpcode() == ISD::TRUNCATE &&
25959 N0.getOperand(0).hasOneUse()) {
25960 SDValue N00 = N0.getOperand(0);
25961 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25962 return DAG.getNode(ISD::AND, dl, VT,
25963 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25964 N00.getOperand(0), N00.getOperand(1)),
25965 DAG.getConstant(1, VT));
25968 if (VT.is256BitVector()) {
25969 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25974 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25975 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25976 // This exposes the zext to the udivrem lowering, so that it directly extends
25977 // from AH (which we otherwise need to do contortions to access).
25978 if (N0.getOpcode() == ISD::UDIVREM &&
25979 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25980 (VT == MVT::i32 || VT == MVT::i64)) {
25981 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25982 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25983 N0.getOperand(0), N0.getOperand(1));
25984 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25985 return R.getValue(1);
25991 // Optimize x == -y --> x+y == 0
25992 // x != -y --> x+y != 0
25993 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25994 const X86Subtarget* Subtarget) {
25995 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25996 SDValue LHS = N->getOperand(0);
25997 SDValue RHS = N->getOperand(1);
25998 EVT VT = N->getValueType(0);
26001 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
26002 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
26003 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
26004 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
26005 LHS.getValueType(), RHS, LHS.getOperand(1));
26006 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
26007 addV, DAG.getConstant(0, addV.getValueType()), CC);
26009 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
26010 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
26011 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
26012 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
26013 RHS.getValueType(), LHS, RHS.getOperand(1));
26014 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
26015 addV, DAG.getConstant(0, addV.getValueType()), CC);
26018 if (VT.getScalarType() == MVT::i1) {
26019 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
26020 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
26021 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
26022 if (!IsSEXT0 && !IsVZero0)
26024 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
26025 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
26026 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
26028 if (!IsSEXT1 && !IsVZero1)
26031 if (IsSEXT0 && IsVZero1) {
26032 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
26033 if (CC == ISD::SETEQ)
26034 return DAG.getNOT(DL, LHS.getOperand(0), VT);
26035 return LHS.getOperand(0);
26037 if (IsSEXT1 && IsVZero0) {
26038 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
26039 if (CC == ISD::SETEQ)
26040 return DAG.getNOT(DL, RHS.getOperand(0), VT);
26041 return RHS.getOperand(0);
26048 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
26049 const X86Subtarget *Subtarget) {
26051 MVT VT = N->getOperand(1)->getSimpleValueType(0);
26052 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
26053 "X86insertps is only defined for v4x32");
26055 SDValue Ld = N->getOperand(1);
26056 if (MayFoldLoad(Ld)) {
26057 // Extract the countS bits from the immediate so we can get the proper
26058 // address when narrowing the vector load to a specific element.
26059 // When the second source op is a memory address, interps doesn't use
26060 // countS and just gets an f32 from that address.
26061 unsigned DestIndex =
26062 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
26063 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
26067 // Create this as a scalar to vector to match the instruction pattern.
26068 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
26069 // countS bits are ignored when loading from memory on insertps, which
26070 // means we don't need to explicitly set them to 0.
26071 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
26072 LoadScalarToVector, N->getOperand(2));
26075 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
26076 // as "sbb reg,reg", since it can be extended without zext and produces
26077 // an all-ones bit which is more useful than 0/1 in some cases.
26078 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
26081 return DAG.getNode(ISD::AND, DL, VT,
26082 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
26083 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
26084 DAG.getConstant(1, VT));
26085 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
26086 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
26087 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
26088 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
26091 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
26092 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
26093 TargetLowering::DAGCombinerInfo &DCI,
26094 const X86Subtarget *Subtarget) {
26096 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
26097 SDValue EFLAGS = N->getOperand(1);
26099 if (CC == X86::COND_A) {
26100 // Try to convert COND_A into COND_B in an attempt to facilitate
26101 // materializing "setb reg".
26103 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
26104 // cannot take an immediate as its first operand.
26106 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
26107 EFLAGS.getValueType().isInteger() &&
26108 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
26109 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
26110 EFLAGS.getNode()->getVTList(),
26111 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
26112 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
26113 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
26117 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
26118 // a zext and produces an all-ones bit which is more useful than 0/1 in some
26120 if (CC == X86::COND_B)
26121 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
26125 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26126 if (Flags.getNode()) {
26127 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26128 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
26134 // Optimize branch condition evaluation.
26136 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
26137 TargetLowering::DAGCombinerInfo &DCI,
26138 const X86Subtarget *Subtarget) {
26140 SDValue Chain = N->getOperand(0);
26141 SDValue Dest = N->getOperand(1);
26142 SDValue EFLAGS = N->getOperand(3);
26143 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
26147 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26148 if (Flags.getNode()) {
26149 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26150 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
26157 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
26158 SelectionDAG &DAG) {
26159 // Take advantage of vector comparisons producing 0 or -1 in each lane to
26160 // optimize away operation when it's from a constant.
26162 // The general transformation is:
26163 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
26164 // AND(VECTOR_CMP(x,y), constant2)
26165 // constant2 = UNARYOP(constant)
26167 // Early exit if this isn't a vector operation, the operand of the
26168 // unary operation isn't a bitwise AND, or if the sizes of the operations
26169 // aren't the same.
26170 EVT VT = N->getValueType(0);
26171 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
26172 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
26173 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
26176 // Now check that the other operand of the AND is a constant. We could
26177 // make the transformation for non-constant splats as well, but it's unclear
26178 // that would be a benefit as it would not eliminate any operations, just
26179 // perform one more step in scalar code before moving to the vector unit.
26180 if (BuildVectorSDNode *BV =
26181 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
26182 // Bail out if the vector isn't a constant.
26183 if (!BV->isConstant())
26186 // Everything checks out. Build up the new and improved node.
26188 EVT IntVT = BV->getValueType(0);
26189 // Create a new constant of the appropriate type for the transformed
26191 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
26192 // The AND node needs bitcasts to/from an integer vector type around it.
26193 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
26194 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
26195 N->getOperand(0)->getOperand(0), MaskConst);
26196 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
26203 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
26204 const X86Subtarget *Subtarget) {
26205 // First try to optimize away the conversion entirely when it's
26206 // conditionally from a constant. Vectors only.
26207 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
26208 if (Res != SDValue())
26211 // Now move on to more general possibilities.
26212 SDValue Op0 = N->getOperand(0);
26213 EVT InVT = Op0->getValueType(0);
26215 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
26216 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
26218 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
26219 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
26220 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
26223 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
26224 // a 32-bit target where SSE doesn't support i64->FP operations.
26225 if (Op0.getOpcode() == ISD::LOAD) {
26226 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
26227 EVT VT = Ld->getValueType(0);
26228 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
26229 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
26230 !Subtarget->is64Bit() && VT == MVT::i64) {
26231 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
26232 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
26233 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
26240 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
26241 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
26242 X86TargetLowering::DAGCombinerInfo &DCI) {
26243 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
26244 // the result is either zero or one (depending on the input carry bit).
26245 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
26246 if (X86::isZeroNode(N->getOperand(0)) &&
26247 X86::isZeroNode(N->getOperand(1)) &&
26248 // We don't have a good way to replace an EFLAGS use, so only do this when
26250 SDValue(N, 1).use_empty()) {
26252 EVT VT = N->getValueType(0);
26253 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
26254 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
26255 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
26256 DAG.getConstant(X86::COND_B,MVT::i8),
26258 DAG.getConstant(1, VT));
26259 return DCI.CombineTo(N, Res1, CarryOut);
26265 // fold (add Y, (sete X, 0)) -> adc 0, Y
26266 // (add Y, (setne X, 0)) -> sbb -1, Y
26267 // (sub (sete X, 0), Y) -> sbb 0, Y
26268 // (sub (setne X, 0), Y) -> adc -1, Y
26269 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
26272 // Look through ZExts.
26273 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
26274 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
26277 SDValue SetCC = Ext.getOperand(0);
26278 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
26281 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
26282 if (CC != X86::COND_E && CC != X86::COND_NE)
26285 SDValue Cmp = SetCC.getOperand(1);
26286 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
26287 !X86::isZeroNode(Cmp.getOperand(1)) ||
26288 !Cmp.getOperand(0).getValueType().isInteger())
26291 SDValue CmpOp0 = Cmp.getOperand(0);
26292 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
26293 DAG.getConstant(1, CmpOp0.getValueType()));
26295 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
26296 if (CC == X86::COND_NE)
26297 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
26298 DL, OtherVal.getValueType(), OtherVal,
26299 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
26300 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
26301 DL, OtherVal.getValueType(), OtherVal,
26302 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
26305 /// PerformADDCombine - Do target-specific dag combines on integer adds.
26306 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
26307 const X86Subtarget *Subtarget) {
26308 EVT VT = N->getValueType(0);
26309 SDValue Op0 = N->getOperand(0);
26310 SDValue Op1 = N->getOperand(1);
26312 // Try to synthesize horizontal adds from adds of shuffles.
26313 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26314 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26315 isHorizontalBinOp(Op0, Op1, true))
26316 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
26318 return OptimizeConditionalInDecrement(N, DAG);
26321 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
26322 const X86Subtarget *Subtarget) {
26323 SDValue Op0 = N->getOperand(0);
26324 SDValue Op1 = N->getOperand(1);
26326 // X86 can't encode an immediate LHS of a sub. See if we can push the
26327 // negation into a preceding instruction.
26328 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
26329 // If the RHS of the sub is a XOR with one use and a constant, invert the
26330 // immediate. Then add one to the LHS of the sub so we can turn
26331 // X-Y -> X+~Y+1, saving one register.
26332 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
26333 isa<ConstantSDNode>(Op1.getOperand(1))) {
26334 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
26335 EVT VT = Op0.getValueType();
26336 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
26338 DAG.getConstant(~XorC, VT));
26339 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
26340 DAG.getConstant(C->getAPIntValue()+1, VT));
26344 // Try to synthesize horizontal adds from adds of shuffles.
26345 EVT VT = N->getValueType(0);
26346 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26347 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26348 isHorizontalBinOp(Op0, Op1, true))
26349 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
26351 return OptimizeConditionalInDecrement(N, DAG);
26354 /// performVZEXTCombine - Performs build vector combines
26355 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
26356 TargetLowering::DAGCombinerInfo &DCI,
26357 const X86Subtarget *Subtarget) {
26359 MVT VT = N->getSimpleValueType(0);
26360 SDValue Op = N->getOperand(0);
26361 MVT OpVT = Op.getSimpleValueType();
26362 MVT OpEltVT = OpVT.getVectorElementType();
26363 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
26365 // (vzext (bitcast (vzext (x)) -> (vzext x)
26367 while (V.getOpcode() == ISD::BITCAST)
26368 V = V.getOperand(0);
26370 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
26371 MVT InnerVT = V.getSimpleValueType();
26372 MVT InnerEltVT = InnerVT.getVectorElementType();
26374 // If the element sizes match exactly, we can just do one larger vzext. This
26375 // is always an exact type match as vzext operates on integer types.
26376 if (OpEltVT == InnerEltVT) {
26377 assert(OpVT == InnerVT && "Types must match for vzext!");
26378 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
26381 // The only other way we can combine them is if only a single element of the
26382 // inner vzext is used in the input to the outer vzext.
26383 if (InnerEltVT.getSizeInBits() < InputBits)
26386 // In this case, the inner vzext is completely dead because we're going to
26387 // only look at bits inside of the low element. Just do the outer vzext on
26388 // a bitcast of the input to the inner.
26389 return DAG.getNode(X86ISD::VZEXT, DL, VT,
26390 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
26393 // Check if we can bypass extracting and re-inserting an element of an input
26394 // vector. Essentialy:
26395 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
26396 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
26397 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
26398 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
26399 SDValue ExtractedV = V.getOperand(0);
26400 SDValue OrigV = ExtractedV.getOperand(0);
26401 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
26402 if (ExtractIdx->getZExtValue() == 0) {
26403 MVT OrigVT = OrigV.getSimpleValueType();
26404 // Extract a subvector if necessary...
26405 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
26406 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
26407 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
26408 OrigVT.getVectorNumElements() / Ratio);
26409 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
26410 DAG.getIntPtrConstant(0));
26412 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
26413 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
26420 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
26421 DAGCombinerInfo &DCI) const {
26422 SelectionDAG &DAG = DCI.DAG;
26423 switch (N->getOpcode()) {
26425 case ISD::EXTRACT_VECTOR_ELT:
26426 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
26429 case X86ISD::SHRUNKBLEND:
26430 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
26431 case ISD::BITCAST: return PerformBITCASTCombine(N, DAG);
26432 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
26433 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
26434 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
26435 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
26436 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
26439 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
26440 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
26441 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
26442 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26443 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26444 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26445 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26446 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26447 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
26448 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26449 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26451 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26453 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26454 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26455 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26456 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26457 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26458 case ISD::ANY_EXTEND:
26459 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26460 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26461 case ISD::SIGN_EXTEND_INREG:
26462 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26463 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26464 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26465 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26466 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26467 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26468 case X86ISD::SHUFP: // Handle all target specific shuffles
26469 case X86ISD::PALIGNR:
26470 case X86ISD::UNPCKH:
26471 case X86ISD::UNPCKL:
26472 case X86ISD::MOVHLPS:
26473 case X86ISD::MOVLHPS:
26474 case X86ISD::PSHUFB:
26475 case X86ISD::PSHUFD:
26476 case X86ISD::PSHUFHW:
26477 case X86ISD::PSHUFLW:
26478 case X86ISD::MOVSS:
26479 case X86ISD::MOVSD:
26480 case X86ISD::VPERMILPI:
26481 case X86ISD::VPERM2X128:
26482 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26483 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26484 case ISD::INTRINSIC_WO_CHAIN:
26485 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26486 case X86ISD::INSERTPS: {
26487 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
26488 return PerformINSERTPSCombine(N, DAG, Subtarget);
26491 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26497 /// isTypeDesirableForOp - Return true if the target has native support for
26498 /// the specified value type and it is 'desirable' to use the type for the
26499 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26500 /// instruction encodings are longer and some i16 instructions are slow.
26501 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26502 if (!isTypeLegal(VT))
26504 if (VT != MVT::i16)
26511 case ISD::SIGN_EXTEND:
26512 case ISD::ZERO_EXTEND:
26513 case ISD::ANY_EXTEND:
26526 /// IsDesirableToPromoteOp - This method query the target whether it is
26527 /// beneficial for dag combiner to promote the specified node. If true, it
26528 /// should return the desired promotion type by reference.
26529 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26530 EVT VT = Op.getValueType();
26531 if (VT != MVT::i16)
26534 bool Promote = false;
26535 bool Commute = false;
26536 switch (Op.getOpcode()) {
26539 LoadSDNode *LD = cast<LoadSDNode>(Op);
26540 // If the non-extending load has a single use and it's not live out, then it
26541 // might be folded.
26542 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26543 Op.hasOneUse()*/) {
26544 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26545 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26546 // The only case where we'd want to promote LOAD (rather then it being
26547 // promoted as an operand is when it's only use is liveout.
26548 if (UI->getOpcode() != ISD::CopyToReg)
26555 case ISD::SIGN_EXTEND:
26556 case ISD::ZERO_EXTEND:
26557 case ISD::ANY_EXTEND:
26562 SDValue N0 = Op.getOperand(0);
26563 // Look out for (store (shl (load), x)).
26564 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26577 SDValue N0 = Op.getOperand(0);
26578 SDValue N1 = Op.getOperand(1);
26579 if (!Commute && MayFoldLoad(N1))
26581 // Avoid disabling potential load folding opportunities.
26582 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26584 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26594 //===----------------------------------------------------------------------===//
26595 // X86 Inline Assembly Support
26596 //===----------------------------------------------------------------------===//
26599 // Helper to match a string separated by whitespace.
26600 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26601 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26603 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26604 StringRef piece(*args[i]);
26605 if (!s.startswith(piece)) // Check if the piece matches.
26608 s = s.substr(piece.size());
26609 StringRef::size_type pos = s.find_first_not_of(" \t");
26610 if (pos == 0) // We matched a prefix.
26618 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26621 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26623 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26624 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26625 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26626 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26628 if (AsmPieces.size() == 3)
26630 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26637 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26638 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26640 std::string AsmStr = IA->getAsmString();
26642 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26643 if (!Ty || Ty->getBitWidth() % 16 != 0)
26646 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26647 SmallVector<StringRef, 4> AsmPieces;
26648 SplitString(AsmStr, AsmPieces, ";\n");
26650 switch (AsmPieces.size()) {
26651 default: return false;
26653 // FIXME: this should verify that we are targeting a 486 or better. If not,
26654 // we will turn this bswap into something that will be lowered to logical
26655 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26656 // lower so don't worry about this.
26658 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26659 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26660 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26661 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26662 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26663 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26664 // No need to check constraints, nothing other than the equivalent of
26665 // "=r,0" would be valid here.
26666 return IntrinsicLowering::LowerToByteSwap(CI);
26669 // rorw $$8, ${0:w} --> llvm.bswap.i16
26670 if (CI->getType()->isIntegerTy(16) &&
26671 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26672 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26673 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26675 const std::string &ConstraintsStr = IA->getConstraintString();
26676 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26677 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26678 if (clobbersFlagRegisters(AsmPieces))
26679 return IntrinsicLowering::LowerToByteSwap(CI);
26683 if (CI->getType()->isIntegerTy(32) &&
26684 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26685 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26686 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26687 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26689 const std::string &ConstraintsStr = IA->getConstraintString();
26690 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26691 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26692 if (clobbersFlagRegisters(AsmPieces))
26693 return IntrinsicLowering::LowerToByteSwap(CI);
26696 if (CI->getType()->isIntegerTy(64)) {
26697 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26698 if (Constraints.size() >= 2 &&
26699 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26700 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26701 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26702 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26703 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26704 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26705 return IntrinsicLowering::LowerToByteSwap(CI);
26713 /// getConstraintType - Given a constraint letter, return the type of
26714 /// constraint it is for this target.
26715 X86TargetLowering::ConstraintType
26716 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26717 if (Constraint.size() == 1) {
26718 switch (Constraint[0]) {
26729 return C_RegisterClass;
26753 return TargetLowering::getConstraintType(Constraint);
26756 /// Examine constraint type and operand type and determine a weight value.
26757 /// This object must already have been set up with the operand type
26758 /// and the current alternative constraint selected.
26759 TargetLowering::ConstraintWeight
26760 X86TargetLowering::getSingleConstraintMatchWeight(
26761 AsmOperandInfo &info, const char *constraint) const {
26762 ConstraintWeight weight = CW_Invalid;
26763 Value *CallOperandVal = info.CallOperandVal;
26764 // If we don't have a value, we can't do a match,
26765 // but allow it at the lowest weight.
26766 if (!CallOperandVal)
26768 Type *type = CallOperandVal->getType();
26769 // Look at the constraint type.
26770 switch (*constraint) {
26772 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26783 if (CallOperandVal->getType()->isIntegerTy())
26784 weight = CW_SpecificReg;
26789 if (type->isFloatingPointTy())
26790 weight = CW_SpecificReg;
26793 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26794 weight = CW_SpecificReg;
26798 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26799 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26800 weight = CW_Register;
26803 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26804 if (C->getZExtValue() <= 31)
26805 weight = CW_Constant;
26809 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26810 if (C->getZExtValue() <= 63)
26811 weight = CW_Constant;
26815 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26816 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26817 weight = CW_Constant;
26821 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26822 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26823 weight = CW_Constant;
26827 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26828 if (C->getZExtValue() <= 3)
26829 weight = CW_Constant;
26833 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26834 if (C->getZExtValue() <= 0xff)
26835 weight = CW_Constant;
26840 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26841 weight = CW_Constant;
26845 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26846 if ((C->getSExtValue() >= -0x80000000LL) &&
26847 (C->getSExtValue() <= 0x7fffffffLL))
26848 weight = CW_Constant;
26852 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26853 if (C->getZExtValue() <= 0xffffffff)
26854 weight = CW_Constant;
26861 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26862 /// with another that has more specific requirements based on the type of the
26863 /// corresponding operand.
26864 const char *X86TargetLowering::
26865 LowerXConstraint(EVT ConstraintVT) const {
26866 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26867 // 'f' like normal targets.
26868 if (ConstraintVT.isFloatingPoint()) {
26869 if (Subtarget->hasSSE2())
26871 if (Subtarget->hasSSE1())
26875 return TargetLowering::LowerXConstraint(ConstraintVT);
26878 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26879 /// vector. If it is invalid, don't add anything to Ops.
26880 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26881 std::string &Constraint,
26882 std::vector<SDValue>&Ops,
26883 SelectionDAG &DAG) const {
26886 // Only support length 1 constraints for now.
26887 if (Constraint.length() > 1) return;
26889 char ConstraintLetter = Constraint[0];
26890 switch (ConstraintLetter) {
26893 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26894 if (C->getZExtValue() <= 31) {
26895 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26901 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26902 if (C->getZExtValue() <= 63) {
26903 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26909 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26910 if (isInt<8>(C->getSExtValue())) {
26911 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26917 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26918 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26919 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26920 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26926 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26927 if (C->getZExtValue() <= 3) {
26928 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26934 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26935 if (C->getZExtValue() <= 255) {
26936 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26942 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26943 if (C->getZExtValue() <= 127) {
26944 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26950 // 32-bit signed value
26951 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26952 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26953 C->getSExtValue())) {
26954 // Widen to 64 bits here to get it sign extended.
26955 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26958 // FIXME gcc accepts some relocatable values here too, but only in certain
26959 // memory models; it's complicated.
26964 // 32-bit unsigned value
26965 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26966 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26967 C->getZExtValue())) {
26968 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26972 // FIXME gcc accepts some relocatable values here too, but only in certain
26973 // memory models; it's complicated.
26977 // Literal immediates are always ok.
26978 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26979 // Widen to 64 bits here to get it sign extended.
26980 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26984 // In any sort of PIC mode addresses need to be computed at runtime by
26985 // adding in a register or some sort of table lookup. These can't
26986 // be used as immediates.
26987 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26990 // If we are in non-pic codegen mode, we allow the address of a global (with
26991 // an optional displacement) to be used with 'i'.
26992 GlobalAddressSDNode *GA = nullptr;
26993 int64_t Offset = 0;
26995 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26997 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26998 Offset += GA->getOffset();
27000 } else if (Op.getOpcode() == ISD::ADD) {
27001 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
27002 Offset += C->getZExtValue();
27003 Op = Op.getOperand(0);
27006 } else if (Op.getOpcode() == ISD::SUB) {
27007 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
27008 Offset += -C->getZExtValue();
27009 Op = Op.getOperand(0);
27014 // Otherwise, this isn't something we can handle, reject it.
27018 const GlobalValue *GV = GA->getGlobal();
27019 // If we require an extra load to get this address, as in PIC mode, we
27020 // can't accept it.
27021 if (isGlobalStubReference(
27022 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
27025 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
27026 GA->getValueType(0), Offset);
27031 if (Result.getNode()) {
27032 Ops.push_back(Result);
27035 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
27038 std::pair<unsigned, const TargetRegisterClass*>
27039 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
27041 // First, see if this is a constraint that directly corresponds to an LLVM
27043 if (Constraint.size() == 1) {
27044 // GCC Constraint Letters
27045 switch (Constraint[0]) {
27047 // TODO: Slight differences here in allocation order and leaving
27048 // RIP in the class. Do they matter any more here than they do
27049 // in the normal allocation?
27050 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
27051 if (Subtarget->is64Bit()) {
27052 if (VT == MVT::i32 || VT == MVT::f32)
27053 return std::make_pair(0U, &X86::GR32RegClass);
27054 if (VT == MVT::i16)
27055 return std::make_pair(0U, &X86::GR16RegClass);
27056 if (VT == MVT::i8 || VT == MVT::i1)
27057 return std::make_pair(0U, &X86::GR8RegClass);
27058 if (VT == MVT::i64 || VT == MVT::f64)
27059 return std::make_pair(0U, &X86::GR64RegClass);
27062 // 32-bit fallthrough
27063 case 'Q': // Q_REGS
27064 if (VT == MVT::i32 || VT == MVT::f32)
27065 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
27066 if (VT == MVT::i16)
27067 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
27068 if (VT == MVT::i8 || VT == MVT::i1)
27069 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
27070 if (VT == MVT::i64)
27071 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
27073 case 'r': // GENERAL_REGS
27074 case 'l': // INDEX_REGS
27075 if (VT == MVT::i8 || VT == MVT::i1)
27076 return std::make_pair(0U, &X86::GR8RegClass);
27077 if (VT == MVT::i16)
27078 return std::make_pair(0U, &X86::GR16RegClass);
27079 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
27080 return std::make_pair(0U, &X86::GR32RegClass);
27081 return std::make_pair(0U, &X86::GR64RegClass);
27082 case 'R': // LEGACY_REGS
27083 if (VT == MVT::i8 || VT == MVT::i1)
27084 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
27085 if (VT == MVT::i16)
27086 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
27087 if (VT == MVT::i32 || !Subtarget->is64Bit())
27088 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
27089 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
27090 case 'f': // FP Stack registers.
27091 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
27092 // value to the correct fpstack register class.
27093 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
27094 return std::make_pair(0U, &X86::RFP32RegClass);
27095 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
27096 return std::make_pair(0U, &X86::RFP64RegClass);
27097 return std::make_pair(0U, &X86::RFP80RegClass);
27098 case 'y': // MMX_REGS if MMX allowed.
27099 if (!Subtarget->hasMMX()) break;
27100 return std::make_pair(0U, &X86::VR64RegClass);
27101 case 'Y': // SSE_REGS if SSE2 allowed
27102 if (!Subtarget->hasSSE2()) break;
27104 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
27105 if (!Subtarget->hasSSE1()) break;
27107 switch (VT.SimpleTy) {
27109 // Scalar SSE types.
27112 return std::make_pair(0U, &X86::FR32RegClass);
27115 return std::make_pair(0U, &X86::FR64RegClass);
27123 return std::make_pair(0U, &X86::VR128RegClass);
27131 return std::make_pair(0U, &X86::VR256RegClass);
27136 return std::make_pair(0U, &X86::VR512RegClass);
27142 // Use the default implementation in TargetLowering to convert the register
27143 // constraint into a member of a register class.
27144 std::pair<unsigned, const TargetRegisterClass*> Res;
27145 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
27147 // Not found as a standard register?
27149 // Map st(0) -> st(7) -> ST0
27150 if (Constraint.size() == 7 && Constraint[0] == '{' &&
27151 tolower(Constraint[1]) == 's' &&
27152 tolower(Constraint[2]) == 't' &&
27153 Constraint[3] == '(' &&
27154 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
27155 Constraint[5] == ')' &&
27156 Constraint[6] == '}') {
27158 Res.first = X86::FP0+Constraint[4]-'0';
27159 Res.second = &X86::RFP80RegClass;
27163 // GCC allows "st(0)" to be called just plain "st".
27164 if (StringRef("{st}").equals_lower(Constraint)) {
27165 Res.first = X86::FP0;
27166 Res.second = &X86::RFP80RegClass;
27171 if (StringRef("{flags}").equals_lower(Constraint)) {
27172 Res.first = X86::EFLAGS;
27173 Res.second = &X86::CCRRegClass;
27177 // 'A' means EAX + EDX.
27178 if (Constraint == "A") {
27179 Res.first = X86::EAX;
27180 Res.second = &X86::GR32_ADRegClass;
27186 // Otherwise, check to see if this is a register class of the wrong value
27187 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
27188 // turn into {ax},{dx}.
27189 if (Res.second->hasType(VT))
27190 return Res; // Correct type already, nothing to do.
27192 // All of the single-register GCC register classes map their values onto
27193 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
27194 // really want an 8-bit or 32-bit register, map to the appropriate register
27195 // class and return the appropriate register.
27196 if (Res.second == &X86::GR16RegClass) {
27197 if (VT == MVT::i8 || VT == MVT::i1) {
27198 unsigned DestReg = 0;
27199 switch (Res.first) {
27201 case X86::AX: DestReg = X86::AL; break;
27202 case X86::DX: DestReg = X86::DL; break;
27203 case X86::CX: DestReg = X86::CL; break;
27204 case X86::BX: DestReg = X86::BL; break;
27207 Res.first = DestReg;
27208 Res.second = &X86::GR8RegClass;
27210 } else if (VT == MVT::i32 || VT == MVT::f32) {
27211 unsigned DestReg = 0;
27212 switch (Res.first) {
27214 case X86::AX: DestReg = X86::EAX; break;
27215 case X86::DX: DestReg = X86::EDX; break;
27216 case X86::CX: DestReg = X86::ECX; break;
27217 case X86::BX: DestReg = X86::EBX; break;
27218 case X86::SI: DestReg = X86::ESI; break;
27219 case X86::DI: DestReg = X86::EDI; break;
27220 case X86::BP: DestReg = X86::EBP; break;
27221 case X86::SP: DestReg = X86::ESP; break;
27224 Res.first = DestReg;
27225 Res.second = &X86::GR32RegClass;
27227 } else if (VT == MVT::i64 || VT == MVT::f64) {
27228 unsigned DestReg = 0;
27229 switch (Res.first) {
27231 case X86::AX: DestReg = X86::RAX; break;
27232 case X86::DX: DestReg = X86::RDX; break;
27233 case X86::CX: DestReg = X86::RCX; break;
27234 case X86::BX: DestReg = X86::RBX; break;
27235 case X86::SI: DestReg = X86::RSI; break;
27236 case X86::DI: DestReg = X86::RDI; break;
27237 case X86::BP: DestReg = X86::RBP; break;
27238 case X86::SP: DestReg = X86::RSP; break;
27241 Res.first = DestReg;
27242 Res.second = &X86::GR64RegClass;
27245 } else if (Res.second == &X86::FR32RegClass ||
27246 Res.second == &X86::FR64RegClass ||
27247 Res.second == &X86::VR128RegClass ||
27248 Res.second == &X86::VR256RegClass ||
27249 Res.second == &X86::FR32XRegClass ||
27250 Res.second == &X86::FR64XRegClass ||
27251 Res.second == &X86::VR128XRegClass ||
27252 Res.second == &X86::VR256XRegClass ||
27253 Res.second == &X86::VR512RegClass) {
27254 // Handle references to XMM physical registers that got mapped into the
27255 // wrong class. This can happen with constraints like {xmm0} where the
27256 // target independent register mapper will just pick the first match it can
27257 // find, ignoring the required type.
27259 if (VT == MVT::f32 || VT == MVT::i32)
27260 Res.second = &X86::FR32RegClass;
27261 else if (VT == MVT::f64 || VT == MVT::i64)
27262 Res.second = &X86::FR64RegClass;
27263 else if (X86::VR128RegClass.hasType(VT))
27264 Res.second = &X86::VR128RegClass;
27265 else if (X86::VR256RegClass.hasType(VT))
27266 Res.second = &X86::VR256RegClass;
27267 else if (X86::VR512RegClass.hasType(VT))
27268 Res.second = &X86::VR512RegClass;
27274 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
27276 // Scaling factors are not free at all.
27277 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
27278 // will take 2 allocations in the out of order engine instead of 1
27279 // for plain addressing mode, i.e. inst (reg1).
27281 // vaddps (%rsi,%drx), %ymm0, %ymm1
27282 // Requires two allocations (one for the load, one for the computation)
27284 // vaddps (%rsi), %ymm0, %ymm1
27285 // Requires just 1 allocation, i.e., freeing allocations for other operations
27286 // and having less micro operations to execute.
27288 // For some X86 architectures, this is even worse because for instance for
27289 // stores, the complex addressing mode forces the instruction to use the
27290 // "load" ports instead of the dedicated "store" port.
27291 // E.g., on Haswell:
27292 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
27293 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
27294 if (isLegalAddressingMode(AM, Ty))
27295 // Scale represents reg2 * scale, thus account for 1
27296 // as soon as we use a second register.
27297 return AM.Scale != 0;
27301 bool X86TargetLowering::isTargetFTOL() const {
27302 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();