1 //===-- X86ISelLowering.cpp - X86 DAG Lowering Implementation -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the interfaces that X86 uses to lower LLVM code into a
13 //===----------------------------------------------------------------------===//
15 #include "X86ISelLowering.h"
16 #include "Utils/X86ShuffleDecode.h"
17 #include "X86CallingConv.h"
18 #include "X86FrameLowering.h"
19 #include "X86InstrBuilder.h"
20 #include "X86MachineFunctionInfo.h"
21 #include "X86TargetMachine.h"
22 #include "X86TargetObjectFile.h"
23 #include "llvm/ADT/SmallBitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/Statistic.h"
26 #include "llvm/ADT/StringExtras.h"
27 #include "llvm/ADT/StringSwitch.h"
28 #include "llvm/ADT/VariadicFunction.h"
29 #include "llvm/CodeGen/IntrinsicLowering.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineFunction.h"
32 #include "llvm/CodeGen/MachineInstrBuilder.h"
33 #include "llvm/CodeGen/MachineJumpTableInfo.h"
34 #include "llvm/CodeGen/MachineModuleInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DerivedTypes.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GlobalAlias.h"
42 #include "llvm/IR/GlobalVariable.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/MC/MCAsmInfo.h"
46 #include "llvm/MC/MCContext.h"
47 #include "llvm/MC/MCExpr.h"
48 #include "llvm/MC/MCSymbol.h"
49 #include "llvm/Support/CommandLine.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/ErrorHandling.h"
52 #include "llvm/Support/MathExtras.h"
53 #include "llvm/Target/TargetOptions.h"
54 #include "X86IntrinsicsInfo.h"
60 #define DEBUG_TYPE "x86-isel"
62 STATISTIC(NumTailCalls, "Number of tail calls");
64 static cl::opt<bool> ExperimentalVectorWideningLegalization(
65 "x86-experimental-vector-widening-legalization", cl::init(false),
66 cl::desc("Enable an experimental vector type legalization through widening "
67 "rather than promotion."),
70 static cl::opt<bool> ExperimentalVectorShuffleLowering(
71 "x86-experimental-vector-shuffle-lowering", cl::init(true),
72 cl::desc("Enable an experimental vector shuffle lowering code path."),
75 static cl::opt<bool> ExperimentalVectorShuffleLegality(
76 "x86-experimental-vector-shuffle-legality", cl::init(false),
77 cl::desc("Enable experimental shuffle legality based on the experimental "
78 "shuffle lowering. Should only be used with the experimental "
82 static cl::opt<int> ReciprocalEstimateRefinementSteps(
83 "x86-recip-refinement-steps", cl::init(1),
84 cl::desc("Specify the number of Newton-Raphson iterations applied to the "
85 "result of the hardware reciprocal estimate instruction."),
88 // Forward declarations.
89 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
92 static SDValue ExtractSubVector(SDValue Vec, unsigned IdxVal,
93 SelectionDAG &DAG, SDLoc dl,
94 unsigned vectorWidth) {
95 assert((vectorWidth == 128 || vectorWidth == 256) &&
96 "Unsupported vector width");
97 EVT VT = Vec.getValueType();
98 EVT ElVT = VT.getVectorElementType();
99 unsigned Factor = VT.getSizeInBits()/vectorWidth;
100 EVT ResultVT = EVT::getVectorVT(*DAG.getContext(), ElVT,
101 VT.getVectorNumElements()/Factor);
103 // Extract from UNDEF is UNDEF.
104 if (Vec.getOpcode() == ISD::UNDEF)
105 return DAG.getUNDEF(ResultVT);
107 // Extract the relevant vectorWidth bits. Generate an EXTRACT_SUBVECTOR
108 unsigned ElemsPerChunk = vectorWidth / ElVT.getSizeInBits();
110 // This is the index of the first element of the vectorWidth-bit chunk
112 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits()) / vectorWidth)
115 // If the input is a buildvector just emit a smaller one.
116 if (Vec.getOpcode() == ISD::BUILD_VECTOR)
117 return DAG.getNode(ISD::BUILD_VECTOR, dl, ResultVT,
118 makeArrayRef(Vec->op_begin() + NormalizedIdxVal,
121 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
122 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, ResultVT, Vec, VecIdx);
125 /// Generate a DAG to grab 128-bits from a vector > 128 bits. This
126 /// sets things up to match to an AVX VEXTRACTF128 / VEXTRACTI128
127 /// or AVX-512 VEXTRACTF32x4 / VEXTRACTI32x4
128 /// instructions or a simple subregister reference. Idx is an index in the
129 /// 128 bits we want. It need not be aligned to a 128-bit boundary. That makes
130 /// lowering EXTRACT_VECTOR_ELT operations easier.
131 static SDValue Extract128BitVector(SDValue Vec, unsigned IdxVal,
132 SelectionDAG &DAG, SDLoc dl) {
133 assert((Vec.getValueType().is256BitVector() ||
134 Vec.getValueType().is512BitVector()) && "Unexpected vector size!");
135 return ExtractSubVector(Vec, IdxVal, DAG, dl, 128);
138 /// Generate a DAG to grab 256-bits from a 512-bit vector.
139 static SDValue Extract256BitVector(SDValue Vec, unsigned IdxVal,
140 SelectionDAG &DAG, SDLoc dl) {
141 assert(Vec.getValueType().is512BitVector() && "Unexpected vector size!");
142 return ExtractSubVector(Vec, IdxVal, DAG, dl, 256);
145 static SDValue InsertSubVector(SDValue Result, SDValue Vec,
146 unsigned IdxVal, SelectionDAG &DAG,
147 SDLoc dl, unsigned vectorWidth) {
148 assert((vectorWidth == 128 || vectorWidth == 256) &&
149 "Unsupported vector width");
150 // Inserting UNDEF is Result
151 if (Vec.getOpcode() == ISD::UNDEF)
153 EVT VT = Vec.getValueType();
154 EVT ElVT = VT.getVectorElementType();
155 EVT ResultVT = Result.getValueType();
157 // Insert the relevant vectorWidth bits.
158 unsigned ElemsPerChunk = vectorWidth/ElVT.getSizeInBits();
160 // This is the index of the first element of the vectorWidth-bit chunk
162 unsigned NormalizedIdxVal = (((IdxVal * ElVT.getSizeInBits())/vectorWidth)
165 SDValue VecIdx = DAG.getIntPtrConstant(NormalizedIdxVal);
166 return DAG.getNode(ISD::INSERT_SUBVECTOR, dl, ResultVT, Result, Vec, VecIdx);
169 /// Generate a DAG to put 128-bits into a vector > 128 bits. This
170 /// sets things up to match to an AVX VINSERTF128/VINSERTI128 or
171 /// AVX-512 VINSERTF32x4/VINSERTI32x4 instructions or a
172 /// simple superregister reference. Idx is an index in the 128 bits
173 /// we want. It need not be aligned to a 128-bit boundary. That makes
174 /// lowering INSERT_VECTOR_ELT operations easier.
175 static SDValue Insert128BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
176 SelectionDAG &DAG,SDLoc dl) {
177 assert(Vec.getValueType().is128BitVector() && "Unexpected vector size!");
178 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 128);
181 static SDValue Insert256BitVector(SDValue Result, SDValue Vec, unsigned IdxVal,
182 SelectionDAG &DAG, SDLoc dl) {
183 assert(Vec.getValueType().is256BitVector() && "Unexpected vector size!");
184 return InsertSubVector(Result, Vec, IdxVal, DAG, dl, 256);
187 /// Concat two 128-bit vectors into a 256 bit vector using VINSERTF128
188 /// instructions. This is used because creating CONCAT_VECTOR nodes of
189 /// BUILD_VECTORS returns a larger BUILD_VECTOR while we're trying to lower
190 /// large BUILD_VECTORS.
191 static SDValue Concat128BitVectors(SDValue V1, SDValue V2, EVT VT,
192 unsigned NumElems, SelectionDAG &DAG,
194 SDValue V = Insert128BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
195 return Insert128BitVector(V, V2, NumElems/2, DAG, dl);
198 static SDValue Concat256BitVectors(SDValue V1, SDValue V2, EVT VT,
199 unsigned NumElems, SelectionDAG &DAG,
201 SDValue V = Insert256BitVector(DAG.getUNDEF(VT), V1, 0, DAG, dl);
202 return Insert256BitVector(V, V2, NumElems/2, DAG, dl);
205 X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
206 const X86Subtarget &STI)
207 : TargetLowering(TM), Subtarget(&STI) {
208 X86ScalarSSEf64 = Subtarget->hasSSE2();
209 X86ScalarSSEf32 = Subtarget->hasSSE1();
210 TD = getDataLayout();
212 // Set up the TargetLowering object.
213 static const MVT IntVTs[] = { MVT::i8, MVT::i16, MVT::i32, MVT::i64 };
215 // X86 is weird. It always uses i8 for shift amounts and setcc results.
216 setBooleanContents(ZeroOrOneBooleanContent);
217 // X86-SSE is even stranger. It uses -1 or 0 for vector masks.
218 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent);
220 // For 64-bit, since we have so many registers, use the ILP scheduler.
221 // For 32-bit, use the register pressure specific scheduling.
222 // For Atom, always use ILP scheduling.
223 if (Subtarget->isAtom())
224 setSchedulingPreference(Sched::ILP);
225 else if (Subtarget->is64Bit())
226 setSchedulingPreference(Sched::ILP);
228 setSchedulingPreference(Sched::RegPressure);
229 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
230 setStackPointerRegisterToSaveRestore(RegInfo->getStackRegister());
232 // Bypass expensive divides on Atom when compiling with O2.
233 if (TM.getOptLevel() >= CodeGenOpt::Default) {
234 if (Subtarget->hasSlowDivide32())
235 addBypassSlowDiv(32, 8);
236 if (Subtarget->hasSlowDivide64() && Subtarget->is64Bit())
237 addBypassSlowDiv(64, 16);
240 if (Subtarget->isTargetKnownWindowsMSVC()) {
241 // Setup Windows compiler runtime calls.
242 setLibcallName(RTLIB::SDIV_I64, "_alldiv");
243 setLibcallName(RTLIB::UDIV_I64, "_aulldiv");
244 setLibcallName(RTLIB::SREM_I64, "_allrem");
245 setLibcallName(RTLIB::UREM_I64, "_aullrem");
246 setLibcallName(RTLIB::MUL_I64, "_allmul");
247 setLibcallCallingConv(RTLIB::SDIV_I64, CallingConv::X86_StdCall);
248 setLibcallCallingConv(RTLIB::UDIV_I64, CallingConv::X86_StdCall);
249 setLibcallCallingConv(RTLIB::SREM_I64, CallingConv::X86_StdCall);
250 setLibcallCallingConv(RTLIB::UREM_I64, CallingConv::X86_StdCall);
251 setLibcallCallingConv(RTLIB::MUL_I64, CallingConv::X86_StdCall);
253 // The _ftol2 runtime function has an unusual calling conv, which
254 // is modeled by a special pseudo-instruction.
255 setLibcallName(RTLIB::FPTOUINT_F64_I64, nullptr);
256 setLibcallName(RTLIB::FPTOUINT_F32_I64, nullptr);
257 setLibcallName(RTLIB::FPTOUINT_F64_I32, nullptr);
258 setLibcallName(RTLIB::FPTOUINT_F32_I32, nullptr);
261 if (Subtarget->isTargetDarwin()) {
262 // Darwin should use _setjmp/_longjmp instead of setjmp/longjmp.
263 setUseUnderscoreSetJmp(false);
264 setUseUnderscoreLongJmp(false);
265 } else if (Subtarget->isTargetWindowsGNU()) {
266 // MS runtime is weird: it exports _setjmp, but longjmp!
267 setUseUnderscoreSetJmp(true);
268 setUseUnderscoreLongJmp(false);
270 setUseUnderscoreSetJmp(true);
271 setUseUnderscoreLongJmp(true);
274 // Set up the register classes.
275 addRegisterClass(MVT::i8, &X86::GR8RegClass);
276 addRegisterClass(MVT::i16, &X86::GR16RegClass);
277 addRegisterClass(MVT::i32, &X86::GR32RegClass);
278 if (Subtarget->is64Bit())
279 addRegisterClass(MVT::i64, &X86::GR64RegClass);
281 for (MVT VT : MVT::integer_valuetypes())
282 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Promote);
284 // We don't accept any truncstore of integer registers.
285 setTruncStoreAction(MVT::i64, MVT::i32, Expand);
286 setTruncStoreAction(MVT::i64, MVT::i16, Expand);
287 setTruncStoreAction(MVT::i64, MVT::i8 , Expand);
288 setTruncStoreAction(MVT::i32, MVT::i16, Expand);
289 setTruncStoreAction(MVT::i32, MVT::i8 , Expand);
290 setTruncStoreAction(MVT::i16, MVT::i8, Expand);
292 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
294 // SETOEQ and SETUNE require checking two conditions.
295 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
296 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
297 setCondCodeAction(ISD::SETOEQ, MVT::f80, Expand);
298 setCondCodeAction(ISD::SETUNE, MVT::f32, Expand);
299 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
300 setCondCodeAction(ISD::SETUNE, MVT::f80, Expand);
302 // Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
304 setOperationAction(ISD::UINT_TO_FP , MVT::i1 , Promote);
305 setOperationAction(ISD::UINT_TO_FP , MVT::i8 , Promote);
306 setOperationAction(ISD::UINT_TO_FP , MVT::i16 , Promote);
308 if (Subtarget->is64Bit()) {
309 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
310 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
311 } else if (!TM.Options.UseSoftFloat) {
312 // We have an algorithm for SSE2->double, and we turn this into a
313 // 64-bit FILD followed by conditional FADD for other targets.
314 setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Custom);
315 // We have an algorithm for SSE2, and we turn this into a 64-bit
316 // FILD for other targets.
317 setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Custom);
320 // Promote i1/i8 SINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have
322 setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
323 setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
325 if (!TM.Options.UseSoftFloat) {
326 // SSE has no i16 to fp conversion, only i32
327 if (X86ScalarSSEf32) {
328 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
329 // f32 and f64 cases are Legal, f80 case is not
330 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
332 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
333 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
336 setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
337 setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Promote);
340 // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
341 // are Legal, f80 is custom lowered.
342 setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
343 setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
345 // Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
347 setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
348 setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
350 if (X86ScalarSSEf32) {
351 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
352 // f32 and f64 cases are Legal, f80 case is not
353 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
355 setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
356 setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
359 // Handle FP_TO_UINT by promoting the destination to a larger signed
361 setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
362 setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
363 setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
365 if (Subtarget->is64Bit()) {
366 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
367 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
368 } else if (!TM.Options.UseSoftFloat) {
369 // Since AVX is a superset of SSE3, only check for SSE here.
370 if (Subtarget->hasSSE1() && !Subtarget->hasSSE3())
371 // Expand FP_TO_UINT into a select.
372 // FIXME: We would like to use a Custom expander here eventually to do
373 // the optimal thing for SSE vs. the default expansion in the legalizer.
374 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
376 // With SSE3 we can use fisttpll to convert to a signed i64; without
377 // SSE, we're stuck with a fistpll.
378 setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Custom);
381 if (isTargetFTOL()) {
382 // Use the _ftol2 runtime function, which has a pseudo-instruction
383 // to handle its weird calling convention.
384 setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Custom);
387 // TODO: when we have SSE, these could be more efficient, by using movd/movq.
388 if (!X86ScalarSSEf64) {
389 setOperationAction(ISD::BITCAST , MVT::f32 , Expand);
390 setOperationAction(ISD::BITCAST , MVT::i32 , Expand);
391 if (Subtarget->is64Bit()) {
392 setOperationAction(ISD::BITCAST , MVT::f64 , Expand);
393 // Without SSE, i64->f64 goes through memory.
394 setOperationAction(ISD::BITCAST , MVT::i64 , Expand);
398 // Scalar integer divide and remainder are lowered to use operations that
399 // produce two results, to match the available instructions. This exposes
400 // the two-result form to trivial CSE, which is able to combine x/y and x%y
401 // into a single instruction.
403 // Scalar integer multiply-high is also lowered to use two-result
404 // operations, to match the available instructions. However, plain multiply
405 // (low) operations are left as Legal, as there are single-result
406 // instructions for this in x86. Using the two-result multiply instructions
407 // when both high and low results are needed must be arranged by dagcombine.
408 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
410 setOperationAction(ISD::MULHS, VT, Expand);
411 setOperationAction(ISD::MULHU, VT, Expand);
412 setOperationAction(ISD::SDIV, VT, Expand);
413 setOperationAction(ISD::UDIV, VT, Expand);
414 setOperationAction(ISD::SREM, VT, Expand);
415 setOperationAction(ISD::UREM, VT, Expand);
417 // Add/Sub overflow ops with MVT::Glues are lowered to EFLAGS dependences.
418 setOperationAction(ISD::ADDC, VT, Custom);
419 setOperationAction(ISD::ADDE, VT, Custom);
420 setOperationAction(ISD::SUBC, VT, Custom);
421 setOperationAction(ISD::SUBE, VT, Custom);
424 setOperationAction(ISD::BR_JT , MVT::Other, Expand);
425 setOperationAction(ISD::BRCOND , MVT::Other, Custom);
426 setOperationAction(ISD::BR_CC , MVT::f32, Expand);
427 setOperationAction(ISD::BR_CC , MVT::f64, Expand);
428 setOperationAction(ISD::BR_CC , MVT::f80, Expand);
429 setOperationAction(ISD::BR_CC , MVT::i8, Expand);
430 setOperationAction(ISD::BR_CC , MVT::i16, Expand);
431 setOperationAction(ISD::BR_CC , MVT::i32, Expand);
432 setOperationAction(ISD::BR_CC , MVT::i64, Expand);
433 setOperationAction(ISD::SELECT_CC , MVT::f32, Expand);
434 setOperationAction(ISD::SELECT_CC , MVT::f64, Expand);
435 setOperationAction(ISD::SELECT_CC , MVT::f80, Expand);
436 setOperationAction(ISD::SELECT_CC , MVT::i8, Expand);
437 setOperationAction(ISD::SELECT_CC , MVT::i16, Expand);
438 setOperationAction(ISD::SELECT_CC , MVT::i32, Expand);
439 setOperationAction(ISD::SELECT_CC , MVT::i64, Expand);
440 if (Subtarget->is64Bit())
441 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i32, Legal);
442 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Legal);
443 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8 , Legal);
444 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
445 setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
446 setOperationAction(ISD::FREM , MVT::f32 , Expand);
447 setOperationAction(ISD::FREM , MVT::f64 , Expand);
448 setOperationAction(ISD::FREM , MVT::f80 , Expand);
449 setOperationAction(ISD::FLT_ROUNDS_ , MVT::i32 , Custom);
451 // Promote the i8 variants and force them on up to i32 which has a shorter
453 setOperationAction(ISD::CTTZ , MVT::i8 , Promote);
454 AddPromotedToType (ISD::CTTZ , MVT::i8 , MVT::i32);
455 setOperationAction(ISD::CTTZ_ZERO_UNDEF , MVT::i8 , Promote);
456 AddPromotedToType (ISD::CTTZ_ZERO_UNDEF , MVT::i8 , MVT::i32);
457 if (Subtarget->hasBMI()) {
458 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i16 , Expand);
459 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32 , Expand);
460 if (Subtarget->is64Bit())
461 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
463 setOperationAction(ISD::CTTZ , MVT::i16 , Custom);
464 setOperationAction(ISD::CTTZ , MVT::i32 , Custom);
465 if (Subtarget->is64Bit())
466 setOperationAction(ISD::CTTZ , MVT::i64 , Custom);
469 if (Subtarget->hasLZCNT()) {
470 // When promoting the i8 variants, force them to i32 for a shorter
472 setOperationAction(ISD::CTLZ , MVT::i8 , Promote);
473 AddPromotedToType (ISD::CTLZ , MVT::i8 , MVT::i32);
474 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Promote);
475 AddPromotedToType (ISD::CTLZ_ZERO_UNDEF, MVT::i8 , MVT::i32);
476 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Expand);
477 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Expand);
478 if (Subtarget->is64Bit())
479 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
481 setOperationAction(ISD::CTLZ , MVT::i8 , Custom);
482 setOperationAction(ISD::CTLZ , MVT::i16 , Custom);
483 setOperationAction(ISD::CTLZ , MVT::i32 , Custom);
484 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i8 , Custom);
485 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i16 , Custom);
486 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32 , Custom);
487 if (Subtarget->is64Bit()) {
488 setOperationAction(ISD::CTLZ , MVT::i64 , Custom);
489 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Custom);
493 // Special handling for half-precision floating point conversions.
494 // If we don't have F16C support, then lower half float conversions
495 // into library calls.
496 if (TM.Options.UseSoftFloat || !Subtarget->hasF16C()) {
497 setOperationAction(ISD::FP16_TO_FP, MVT::f32, Expand);
498 setOperationAction(ISD::FP_TO_FP16, MVT::f32, Expand);
501 // There's never any support for operations beyond MVT::f32.
502 setOperationAction(ISD::FP16_TO_FP, MVT::f64, Expand);
503 setOperationAction(ISD::FP16_TO_FP, MVT::f80, Expand);
504 setOperationAction(ISD::FP_TO_FP16, MVT::f64, Expand);
505 setOperationAction(ISD::FP_TO_FP16, MVT::f80, Expand);
507 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
508 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
509 setLoadExtAction(ISD::EXTLOAD, MVT::f80, MVT::f16, Expand);
510 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
511 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
512 setTruncStoreAction(MVT::f80, MVT::f16, Expand);
514 if (Subtarget->hasPOPCNT()) {
515 setOperationAction(ISD::CTPOP , MVT::i8 , Promote);
517 setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
518 setOperationAction(ISD::CTPOP , MVT::i16 , Expand);
519 setOperationAction(ISD::CTPOP , MVT::i32 , Expand);
520 if (Subtarget->is64Bit())
521 setOperationAction(ISD::CTPOP , MVT::i64 , Expand);
524 setOperationAction(ISD::READCYCLECOUNTER , MVT::i64 , Custom);
526 if (!Subtarget->hasMOVBE())
527 setOperationAction(ISD::BSWAP , MVT::i16 , Expand);
529 // These should be promoted to a larger select which is supported.
530 setOperationAction(ISD::SELECT , MVT::i1 , Promote);
531 // X86 wants to expand cmov itself.
532 setOperationAction(ISD::SELECT , MVT::i8 , Custom);
533 setOperationAction(ISD::SELECT , MVT::i16 , Custom);
534 setOperationAction(ISD::SELECT , MVT::i32 , Custom);
535 setOperationAction(ISD::SELECT , MVT::f32 , Custom);
536 setOperationAction(ISD::SELECT , MVT::f64 , Custom);
537 setOperationAction(ISD::SELECT , MVT::f80 , Custom);
538 setOperationAction(ISD::SETCC , MVT::i8 , Custom);
539 setOperationAction(ISD::SETCC , MVT::i16 , Custom);
540 setOperationAction(ISD::SETCC , MVT::i32 , Custom);
541 setOperationAction(ISD::SETCC , MVT::f32 , Custom);
542 setOperationAction(ISD::SETCC , MVT::f64 , Custom);
543 setOperationAction(ISD::SETCC , MVT::f80 , Custom);
544 if (Subtarget->is64Bit()) {
545 setOperationAction(ISD::SELECT , MVT::i64 , Custom);
546 setOperationAction(ISD::SETCC , MVT::i64 , Custom);
548 setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
549 // NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intended to support
550 // SjLj exception handling but a light-weight setjmp/longjmp replacement to
551 // support continuation, user-level threading, and etc.. As a result, no
552 // other SjLj exception interfaces are implemented and please don't build
553 // your own exception handling based on them.
554 // LLVM/Clang supports zero-cost DWARF exception handling.
555 setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
556 setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
559 setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
560 setOperationAction(ISD::JumpTable , MVT::i32 , Custom);
561 setOperationAction(ISD::GlobalAddress , MVT::i32 , Custom);
562 setOperationAction(ISD::GlobalTLSAddress, MVT::i32 , Custom);
563 if (Subtarget->is64Bit())
564 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom);
565 setOperationAction(ISD::ExternalSymbol , MVT::i32 , Custom);
566 setOperationAction(ISD::BlockAddress , MVT::i32 , Custom);
567 if (Subtarget->is64Bit()) {
568 setOperationAction(ISD::ConstantPool , MVT::i64 , Custom);
569 setOperationAction(ISD::JumpTable , MVT::i64 , Custom);
570 setOperationAction(ISD::GlobalAddress , MVT::i64 , Custom);
571 setOperationAction(ISD::ExternalSymbol, MVT::i64 , Custom);
572 setOperationAction(ISD::BlockAddress , MVT::i64 , Custom);
574 // 64-bit addm sub, shl, sra, srl (iff 32-bit x86)
575 setOperationAction(ISD::SHL_PARTS , MVT::i32 , Custom);
576 setOperationAction(ISD::SRA_PARTS , MVT::i32 , Custom);
577 setOperationAction(ISD::SRL_PARTS , MVT::i32 , Custom);
578 if (Subtarget->is64Bit()) {
579 setOperationAction(ISD::SHL_PARTS , MVT::i64 , Custom);
580 setOperationAction(ISD::SRA_PARTS , MVT::i64 , Custom);
581 setOperationAction(ISD::SRL_PARTS , MVT::i64 , Custom);
584 if (Subtarget->hasSSE1())
585 setOperationAction(ISD::PREFETCH , MVT::Other, Legal);
587 setOperationAction(ISD::ATOMIC_FENCE , MVT::Other, Custom);
589 // Expand certain atomics
590 for (unsigned i = 0; i != array_lengthof(IntVTs); ++i) {
592 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, VT, Custom);
593 setOperationAction(ISD::ATOMIC_LOAD_SUB, VT, Custom);
594 setOperationAction(ISD::ATOMIC_STORE, VT, Custom);
597 if (Subtarget->hasCmpxchg16b()) {
598 setOperationAction(ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS, MVT::i128, Custom);
601 // FIXME - use subtarget debug flags
602 if (!Subtarget->isTargetDarwin() && !Subtarget->isTargetELF() &&
603 !Subtarget->isTargetCygMing() && !Subtarget->isTargetWin64()) {
604 setOperationAction(ISD::EH_LABEL, MVT::Other, Expand);
607 if (Subtarget->is64Bit()) {
608 setExceptionPointerRegister(X86::RAX);
609 setExceptionSelectorRegister(X86::RDX);
611 setExceptionPointerRegister(X86::EAX);
612 setExceptionSelectorRegister(X86::EDX);
614 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
615 setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i64, Custom);
617 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
618 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
620 setOperationAction(ISD::TRAP, MVT::Other, Legal);
621 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
623 // VASTART needs to be custom lowered to use the VarArgsFrameIndex
624 setOperationAction(ISD::VASTART , MVT::Other, Custom);
625 setOperationAction(ISD::VAEND , MVT::Other, Expand);
626 if (Subtarget->is64Bit() && !Subtarget->isTargetWin64()) {
627 // TargetInfo::X86_64ABIBuiltinVaList
628 setOperationAction(ISD::VAARG , MVT::Other, Custom);
629 setOperationAction(ISD::VACOPY , MVT::Other, Custom);
631 // TargetInfo::CharPtrBuiltinVaList
632 setOperationAction(ISD::VAARG , MVT::Other, Expand);
633 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
636 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
637 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
639 setOperationAction(ISD::DYNAMIC_STACKALLOC, getPointerTy(), Custom);
641 if (!TM.Options.UseSoftFloat && X86ScalarSSEf64) {
642 // f32 and f64 use SSE.
643 // Set up the FP register classes.
644 addRegisterClass(MVT::f32, &X86::FR32RegClass);
645 addRegisterClass(MVT::f64, &X86::FR64RegClass);
647 // Use ANDPD to simulate FABS.
648 setOperationAction(ISD::FABS , MVT::f64, Custom);
649 setOperationAction(ISD::FABS , MVT::f32, Custom);
651 // Use XORP to simulate FNEG.
652 setOperationAction(ISD::FNEG , MVT::f64, Custom);
653 setOperationAction(ISD::FNEG , MVT::f32, Custom);
655 // Use ANDPD and ORPD to simulate FCOPYSIGN.
656 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom);
657 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
659 // Lower this to FGETSIGNx86 plus an AND.
660 setOperationAction(ISD::FGETSIGN, MVT::i64, Custom);
661 setOperationAction(ISD::FGETSIGN, MVT::i32, Custom);
663 // We don't support sin/cos/fmod
664 setOperationAction(ISD::FSIN , MVT::f64, Expand);
665 setOperationAction(ISD::FCOS , MVT::f64, Expand);
666 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
667 setOperationAction(ISD::FSIN , MVT::f32, Expand);
668 setOperationAction(ISD::FCOS , MVT::f32, Expand);
669 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
671 // Expand FP immediates into loads from the stack, except for the special
673 addLegalFPImmediate(APFloat(+0.0)); // xorpd
674 addLegalFPImmediate(APFloat(+0.0f)); // xorps
675 } else if (!TM.Options.UseSoftFloat && X86ScalarSSEf32) {
676 // Use SSE for f32, x87 for f64.
677 // Set up the FP register classes.
678 addRegisterClass(MVT::f32, &X86::FR32RegClass);
679 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
681 // Use ANDPS to simulate FABS.
682 setOperationAction(ISD::FABS , MVT::f32, Custom);
684 // Use XORP to simulate FNEG.
685 setOperationAction(ISD::FNEG , MVT::f32, Custom);
687 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
689 // Use ANDPS and ORPS to simulate FCOPYSIGN.
690 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
691 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
693 // We don't support sin/cos/fmod
694 setOperationAction(ISD::FSIN , MVT::f32, Expand);
695 setOperationAction(ISD::FCOS , MVT::f32, Expand);
696 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
698 // Special cases we handle for FP constants.
699 addLegalFPImmediate(APFloat(+0.0f)); // xorps
700 addLegalFPImmediate(APFloat(+0.0)); // FLD0
701 addLegalFPImmediate(APFloat(+1.0)); // FLD1
702 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
703 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
705 if (!TM.Options.UnsafeFPMath) {
706 setOperationAction(ISD::FSIN , MVT::f64, Expand);
707 setOperationAction(ISD::FCOS , MVT::f64, Expand);
708 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
710 } else if (!TM.Options.UseSoftFloat) {
711 // f32 and f64 in x87.
712 // Set up the FP register classes.
713 addRegisterClass(MVT::f64, &X86::RFP64RegClass);
714 addRegisterClass(MVT::f32, &X86::RFP32RegClass);
716 setOperationAction(ISD::UNDEF, MVT::f64, Expand);
717 setOperationAction(ISD::UNDEF, MVT::f32, Expand);
718 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
719 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
721 if (!TM.Options.UnsafeFPMath) {
722 setOperationAction(ISD::FSIN , MVT::f64, Expand);
723 setOperationAction(ISD::FSIN , MVT::f32, Expand);
724 setOperationAction(ISD::FCOS , MVT::f64, Expand);
725 setOperationAction(ISD::FCOS , MVT::f32, Expand);
726 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
727 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
729 addLegalFPImmediate(APFloat(+0.0)); // FLD0
730 addLegalFPImmediate(APFloat(+1.0)); // FLD1
731 addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
732 addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
733 addLegalFPImmediate(APFloat(+0.0f)); // FLD0
734 addLegalFPImmediate(APFloat(+1.0f)); // FLD1
735 addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
736 addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
739 // We don't support FMA.
740 setOperationAction(ISD::FMA, MVT::f64, Expand);
741 setOperationAction(ISD::FMA, MVT::f32, Expand);
743 // Long double always uses X87.
744 if (!TM.Options.UseSoftFloat) {
745 addRegisterClass(MVT::f80, &X86::RFP80RegClass);
746 setOperationAction(ISD::UNDEF, MVT::f80, Expand);
747 setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
749 APFloat TmpFlt = APFloat::getZero(APFloat::x87DoubleExtended);
750 addLegalFPImmediate(TmpFlt); // FLD0
752 addLegalFPImmediate(TmpFlt); // FLD0/FCHS
755 APFloat TmpFlt2(+1.0);
756 TmpFlt2.convert(APFloat::x87DoubleExtended, APFloat::rmNearestTiesToEven,
758 addLegalFPImmediate(TmpFlt2); // FLD1
759 TmpFlt2.changeSign();
760 addLegalFPImmediate(TmpFlt2); // FLD1/FCHS
763 if (!TM.Options.UnsafeFPMath) {
764 setOperationAction(ISD::FSIN , MVT::f80, Expand);
765 setOperationAction(ISD::FCOS , MVT::f80, Expand);
766 setOperationAction(ISD::FSINCOS, MVT::f80, Expand);
769 setOperationAction(ISD::FFLOOR, MVT::f80, Expand);
770 setOperationAction(ISD::FCEIL, MVT::f80, Expand);
771 setOperationAction(ISD::FTRUNC, MVT::f80, Expand);
772 setOperationAction(ISD::FRINT, MVT::f80, Expand);
773 setOperationAction(ISD::FNEARBYINT, MVT::f80, Expand);
774 setOperationAction(ISD::FMA, MVT::f80, Expand);
777 // Always use a library call for pow.
778 setOperationAction(ISD::FPOW , MVT::f32 , Expand);
779 setOperationAction(ISD::FPOW , MVT::f64 , Expand);
780 setOperationAction(ISD::FPOW , MVT::f80 , Expand);
782 setOperationAction(ISD::FLOG, MVT::f80, Expand);
783 setOperationAction(ISD::FLOG2, MVT::f80, Expand);
784 setOperationAction(ISD::FLOG10, MVT::f80, Expand);
785 setOperationAction(ISD::FEXP, MVT::f80, Expand);
786 setOperationAction(ISD::FEXP2, MVT::f80, Expand);
787 setOperationAction(ISD::FMINNUM, MVT::f80, Expand);
788 setOperationAction(ISD::FMAXNUM, MVT::f80, Expand);
790 // First set operation action for all vector types to either promote
791 // (for widening) or expand (for scalarization). Then we will selectively
792 // turn on ones that can be effectively codegen'd.
793 for (MVT VT : MVT::vector_valuetypes()) {
794 setOperationAction(ISD::ADD , VT, Expand);
795 setOperationAction(ISD::SUB , VT, Expand);
796 setOperationAction(ISD::FADD, VT, Expand);
797 setOperationAction(ISD::FNEG, VT, Expand);
798 setOperationAction(ISD::FSUB, VT, Expand);
799 setOperationAction(ISD::MUL , VT, Expand);
800 setOperationAction(ISD::FMUL, VT, Expand);
801 setOperationAction(ISD::SDIV, VT, Expand);
802 setOperationAction(ISD::UDIV, VT, Expand);
803 setOperationAction(ISD::FDIV, VT, Expand);
804 setOperationAction(ISD::SREM, VT, Expand);
805 setOperationAction(ISD::UREM, VT, Expand);
806 setOperationAction(ISD::LOAD, VT, Expand);
807 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Expand);
808 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT,Expand);
809 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Expand);
810 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT,Expand);
811 setOperationAction(ISD::INSERT_SUBVECTOR, VT,Expand);
812 setOperationAction(ISD::FABS, VT, Expand);
813 setOperationAction(ISD::FSIN, VT, Expand);
814 setOperationAction(ISD::FSINCOS, VT, Expand);
815 setOperationAction(ISD::FCOS, VT, Expand);
816 setOperationAction(ISD::FSINCOS, VT, Expand);
817 setOperationAction(ISD::FREM, VT, Expand);
818 setOperationAction(ISD::FMA, VT, Expand);
819 setOperationAction(ISD::FPOWI, VT, Expand);
820 setOperationAction(ISD::FSQRT, VT, Expand);
821 setOperationAction(ISD::FCOPYSIGN, VT, Expand);
822 setOperationAction(ISD::FFLOOR, VT, Expand);
823 setOperationAction(ISD::FCEIL, VT, Expand);
824 setOperationAction(ISD::FTRUNC, VT, Expand);
825 setOperationAction(ISD::FRINT, VT, Expand);
826 setOperationAction(ISD::FNEARBYINT, VT, Expand);
827 setOperationAction(ISD::SMUL_LOHI, VT, Expand);
828 setOperationAction(ISD::MULHS, VT, Expand);
829 setOperationAction(ISD::UMUL_LOHI, VT, Expand);
830 setOperationAction(ISD::MULHU, VT, Expand);
831 setOperationAction(ISD::SDIVREM, VT, Expand);
832 setOperationAction(ISD::UDIVREM, VT, Expand);
833 setOperationAction(ISD::FPOW, VT, Expand);
834 setOperationAction(ISD::CTPOP, VT, Expand);
835 setOperationAction(ISD::CTTZ, VT, Expand);
836 setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
837 setOperationAction(ISD::CTLZ, VT, Expand);
838 setOperationAction(ISD::CTLZ_ZERO_UNDEF, VT, Expand);
839 setOperationAction(ISD::SHL, VT, Expand);
840 setOperationAction(ISD::SRA, VT, Expand);
841 setOperationAction(ISD::SRL, VT, Expand);
842 setOperationAction(ISD::ROTL, VT, Expand);
843 setOperationAction(ISD::ROTR, VT, Expand);
844 setOperationAction(ISD::BSWAP, VT, Expand);
845 setOperationAction(ISD::SETCC, VT, Expand);
846 setOperationAction(ISD::FLOG, VT, Expand);
847 setOperationAction(ISD::FLOG2, VT, Expand);
848 setOperationAction(ISD::FLOG10, VT, Expand);
849 setOperationAction(ISD::FEXP, VT, Expand);
850 setOperationAction(ISD::FEXP2, VT, Expand);
851 setOperationAction(ISD::FP_TO_UINT, VT, Expand);
852 setOperationAction(ISD::FP_TO_SINT, VT, Expand);
853 setOperationAction(ISD::UINT_TO_FP, VT, Expand);
854 setOperationAction(ISD::SINT_TO_FP, VT, Expand);
855 setOperationAction(ISD::SIGN_EXTEND_INREG, VT,Expand);
856 setOperationAction(ISD::TRUNCATE, VT, Expand);
857 setOperationAction(ISD::SIGN_EXTEND, VT, Expand);
858 setOperationAction(ISD::ZERO_EXTEND, VT, Expand);
859 setOperationAction(ISD::ANY_EXTEND, VT, Expand);
860 setOperationAction(ISD::VSELECT, VT, Expand);
861 setOperationAction(ISD::SELECT_CC, VT, Expand);
862 for (MVT InnerVT : MVT::vector_valuetypes()) {
863 setTruncStoreAction(InnerVT, VT, Expand);
865 setLoadExtAction(ISD::SEXTLOAD, InnerVT, VT, Expand);
866 setLoadExtAction(ISD::ZEXTLOAD, InnerVT, VT, Expand);
868 // N.b. ISD::EXTLOAD legality is basically ignored except for i1-like
869 // types, we have to deal with them whether we ask for Expansion or not.
870 // Setting Expand causes its own optimisation problems though, so leave
872 if (VT.getVectorElementType() == MVT::i1)
873 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand);
877 // FIXME: In order to prevent SSE instructions being expanded to MMX ones
878 // with -msoft-float, disable use of MMX as well.
879 if (!TM.Options.UseSoftFloat && Subtarget->hasMMX()) {
880 addRegisterClass(MVT::x86mmx, &X86::VR64RegClass);
881 // No operations on x86mmx supported, everything uses intrinsics.
884 // MMX-sized vectors (other than x86mmx) are expected to be expanded
885 // into smaller operations.
886 setOperationAction(ISD::MULHS, MVT::v8i8, Expand);
887 setOperationAction(ISD::MULHS, MVT::v4i16, Expand);
888 setOperationAction(ISD::MULHS, MVT::v2i32, Expand);
889 setOperationAction(ISD::MULHS, MVT::v1i64, Expand);
890 setOperationAction(ISD::AND, MVT::v8i8, Expand);
891 setOperationAction(ISD::AND, MVT::v4i16, Expand);
892 setOperationAction(ISD::AND, MVT::v2i32, Expand);
893 setOperationAction(ISD::AND, MVT::v1i64, Expand);
894 setOperationAction(ISD::OR, MVT::v8i8, Expand);
895 setOperationAction(ISD::OR, MVT::v4i16, Expand);
896 setOperationAction(ISD::OR, MVT::v2i32, Expand);
897 setOperationAction(ISD::OR, MVT::v1i64, Expand);
898 setOperationAction(ISD::XOR, MVT::v8i8, Expand);
899 setOperationAction(ISD::XOR, MVT::v4i16, Expand);
900 setOperationAction(ISD::XOR, MVT::v2i32, Expand);
901 setOperationAction(ISD::XOR, MVT::v1i64, Expand);
902 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i8, Expand);
903 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v4i16, Expand);
904 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v2i32, Expand);
905 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v1i64, Expand);
906 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v1i64, Expand);
907 setOperationAction(ISD::SELECT, MVT::v8i8, Expand);
908 setOperationAction(ISD::SELECT, MVT::v4i16, Expand);
909 setOperationAction(ISD::SELECT, MVT::v2i32, Expand);
910 setOperationAction(ISD::SELECT, MVT::v1i64, Expand);
911 setOperationAction(ISD::BITCAST, MVT::v8i8, Expand);
912 setOperationAction(ISD::BITCAST, MVT::v4i16, Expand);
913 setOperationAction(ISD::BITCAST, MVT::v2i32, Expand);
914 setOperationAction(ISD::BITCAST, MVT::v1i64, Expand);
916 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE1()) {
917 addRegisterClass(MVT::v4f32, &X86::VR128RegClass);
919 setOperationAction(ISD::FADD, MVT::v4f32, Legal);
920 setOperationAction(ISD::FSUB, MVT::v4f32, Legal);
921 setOperationAction(ISD::FMUL, MVT::v4f32, Legal);
922 setOperationAction(ISD::FDIV, MVT::v4f32, Legal);
923 setOperationAction(ISD::FSQRT, MVT::v4f32, Legal);
924 setOperationAction(ISD::FNEG, MVT::v4f32, Custom);
925 setOperationAction(ISD::FABS, MVT::v4f32, Custom);
926 setOperationAction(ISD::LOAD, MVT::v4f32, Legal);
927 setOperationAction(ISD::BUILD_VECTOR, MVT::v4f32, Custom);
928 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
929 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
930 setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
931 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom);
934 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE2()) {
935 addRegisterClass(MVT::v2f64, &X86::VR128RegClass);
937 // FIXME: Unfortunately, -soft-float and -no-implicit-float mean XMM
938 // registers cannot be used even for integer operations.
939 addRegisterClass(MVT::v16i8, &X86::VR128RegClass);
940 addRegisterClass(MVT::v8i16, &X86::VR128RegClass);
941 addRegisterClass(MVT::v4i32, &X86::VR128RegClass);
942 addRegisterClass(MVT::v2i64, &X86::VR128RegClass);
944 setOperationAction(ISD::ADD, MVT::v16i8, Legal);
945 setOperationAction(ISD::ADD, MVT::v8i16, Legal);
946 setOperationAction(ISD::ADD, MVT::v4i32, Legal);
947 setOperationAction(ISD::ADD, MVT::v2i64, Legal);
948 setOperationAction(ISD::MUL, MVT::v4i32, Custom);
949 setOperationAction(ISD::MUL, MVT::v2i64, Custom);
950 setOperationAction(ISD::UMUL_LOHI, MVT::v4i32, Custom);
951 setOperationAction(ISD::SMUL_LOHI, MVT::v4i32, Custom);
952 setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
953 setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
954 setOperationAction(ISD::SUB, MVT::v16i8, Legal);
955 setOperationAction(ISD::SUB, MVT::v8i16, Legal);
956 setOperationAction(ISD::SUB, MVT::v4i32, Legal);
957 setOperationAction(ISD::SUB, MVT::v2i64, Legal);
958 setOperationAction(ISD::MUL, MVT::v8i16, Legal);
959 setOperationAction(ISD::FADD, MVT::v2f64, Legal);
960 setOperationAction(ISD::FSUB, MVT::v2f64, Legal);
961 setOperationAction(ISD::FMUL, MVT::v2f64, Legal);
962 setOperationAction(ISD::FDIV, MVT::v2f64, Legal);
963 setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
964 setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
965 setOperationAction(ISD::FABS, MVT::v2f64, Custom);
967 setOperationAction(ISD::SETCC, MVT::v2i64, Custom);
968 setOperationAction(ISD::SETCC, MVT::v16i8, Custom);
969 setOperationAction(ISD::SETCC, MVT::v8i16, Custom);
970 setOperationAction(ISD::SETCC, MVT::v4i32, Custom);
972 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
973 setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
974 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
975 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
976 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
978 // Only provide customized ctpop vector bit twiddling for vector types we
979 // know to perform better than using the popcnt instructions on each vector
980 // element. If popcnt isn't supported, always provide the custom version.
981 if (!Subtarget->hasPOPCNT()) {
982 setOperationAction(ISD::CTPOP, MVT::v4i32, Custom);
983 setOperationAction(ISD::CTPOP, MVT::v2i64, Custom);
986 // Custom lower build_vector, vector_shuffle, and extract_vector_elt.
987 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
988 MVT VT = (MVT::SimpleValueType)i;
989 // Do not attempt to custom lower non-power-of-2 vectors
990 if (!isPowerOf2_32(VT.getVectorNumElements()))
992 // Do not attempt to custom lower non-128-bit vectors
993 if (!VT.is128BitVector())
995 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
996 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
997 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1000 // We support custom legalizing of sext and anyext loads for specific
1001 // memory vector types which we can load as a scalar (or sequence of
1002 // scalars) and extend in-register to a legal 128-bit vector type. For sext
1003 // loads these must work with a single scalar load.
1004 for (MVT VT : MVT::integer_vector_valuetypes()) {
1005 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i8, Custom);
1006 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v4i16, Custom);
1007 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v8i8, Custom);
1008 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i8, Custom);
1009 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i16, Custom);
1010 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2i32, Custom);
1011 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i8, Custom);
1012 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4i16, Custom);
1013 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8i8, Custom);
1016 setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
1017 setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
1018 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2f64, Custom);
1019 setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v2i64, Custom);
1020 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2f64, Custom);
1021 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2f64, Custom);
1023 if (Subtarget->is64Bit()) {
1024 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1025 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1028 // Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
1029 for (int i = MVT::v16i8; i != MVT::v2i64; ++i) {
1030 MVT VT = (MVT::SimpleValueType)i;
1032 // Do not attempt to promote non-128-bit vectors
1033 if (!VT.is128BitVector())
1036 setOperationAction(ISD::AND, VT, Promote);
1037 AddPromotedToType (ISD::AND, VT, MVT::v2i64);
1038 setOperationAction(ISD::OR, VT, Promote);
1039 AddPromotedToType (ISD::OR, VT, MVT::v2i64);
1040 setOperationAction(ISD::XOR, VT, Promote);
1041 AddPromotedToType (ISD::XOR, VT, MVT::v2i64);
1042 setOperationAction(ISD::LOAD, VT, Promote);
1043 AddPromotedToType (ISD::LOAD, VT, MVT::v2i64);
1044 setOperationAction(ISD::SELECT, VT, Promote);
1045 AddPromotedToType (ISD::SELECT, VT, MVT::v2i64);
1048 // Custom lower v2i64 and v2f64 selects.
1049 setOperationAction(ISD::LOAD, MVT::v2f64, Legal);
1050 setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
1051 setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
1052 setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
1054 setOperationAction(ISD::FP_TO_SINT, MVT::v4i32, Legal);
1055 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Legal);
1057 setOperationAction(ISD::UINT_TO_FP, MVT::v4i8, Custom);
1058 setOperationAction(ISD::UINT_TO_FP, MVT::v4i16, Custom);
1059 // As there is no 64-bit GPR available, we need build a special custom
1060 // sequence to convert from v2i32 to v2f32.
1061 if (!Subtarget->is64Bit())
1062 setOperationAction(ISD::UINT_TO_FP, MVT::v2f32, Custom);
1064 setOperationAction(ISD::FP_EXTEND, MVT::v2f32, Custom);
1065 setOperationAction(ISD::FP_ROUND, MVT::v2f32, Custom);
1067 for (MVT VT : MVT::fp_vector_valuetypes())
1068 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v2f32, Legal);
1070 setOperationAction(ISD::BITCAST, MVT::v2i32, Custom);
1071 setOperationAction(ISD::BITCAST, MVT::v4i16, Custom);
1072 setOperationAction(ISD::BITCAST, MVT::v8i8, Custom);
1075 if (!TM.Options.UseSoftFloat && Subtarget->hasSSE41()) {
1076 setOperationAction(ISD::FFLOOR, MVT::f32, Legal);
1077 setOperationAction(ISD::FCEIL, MVT::f32, Legal);
1078 setOperationAction(ISD::FTRUNC, MVT::f32, Legal);
1079 setOperationAction(ISD::FRINT, MVT::f32, Legal);
1080 setOperationAction(ISD::FNEARBYINT, MVT::f32, Legal);
1081 setOperationAction(ISD::FFLOOR, MVT::f64, Legal);
1082 setOperationAction(ISD::FCEIL, MVT::f64, Legal);
1083 setOperationAction(ISD::FTRUNC, MVT::f64, Legal);
1084 setOperationAction(ISD::FRINT, MVT::f64, Legal);
1085 setOperationAction(ISD::FNEARBYINT, MVT::f64, Legal);
1087 setOperationAction(ISD::FFLOOR, MVT::v4f32, Legal);
1088 setOperationAction(ISD::FCEIL, MVT::v4f32, Legal);
1089 setOperationAction(ISD::FTRUNC, MVT::v4f32, Legal);
1090 setOperationAction(ISD::FRINT, MVT::v4f32, Legal);
1091 setOperationAction(ISD::FNEARBYINT, MVT::v4f32, Legal);
1092 setOperationAction(ISD::FFLOOR, MVT::v2f64, Legal);
1093 setOperationAction(ISD::FCEIL, MVT::v2f64, Legal);
1094 setOperationAction(ISD::FTRUNC, MVT::v2f64, Legal);
1095 setOperationAction(ISD::FRINT, MVT::v2f64, Legal);
1096 setOperationAction(ISD::FNEARBYINT, MVT::v2f64, Legal);
1098 // FIXME: Do we need to handle scalar-to-vector here?
1099 setOperationAction(ISD::MUL, MVT::v4i32, Legal);
1101 setOperationAction(ISD::VSELECT, MVT::v2f64, Custom);
1102 setOperationAction(ISD::VSELECT, MVT::v2i64, Custom);
1103 setOperationAction(ISD::VSELECT, MVT::v4i32, Custom);
1104 setOperationAction(ISD::VSELECT, MVT::v4f32, Custom);
1105 setOperationAction(ISD::VSELECT, MVT::v8i16, Custom);
1106 // There is no BLENDI for byte vectors. We don't need to custom lower
1107 // some vselects for now.
1108 setOperationAction(ISD::VSELECT, MVT::v16i8, Legal);
1110 // SSE41 brings specific instructions for doing vector sign extend even in
1111 // cases where we don't have SRA.
1112 for (MVT VT : MVT::integer_vector_valuetypes()) {
1113 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i8, Custom);
1114 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i16, Custom);
1115 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::v2i32, Custom);
1118 // SSE41 also has vector sign/zero extending loads, PMOV[SZ]X
1119 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1120 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1121 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1122 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1123 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1124 setLoadExtAction(ISD::SEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1126 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i16, MVT::v8i8, Legal);
1127 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i8, Legal);
1128 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i8, Legal);
1129 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i32, MVT::v4i16, Legal);
1130 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i16, Legal);
1131 setLoadExtAction(ISD::ZEXTLOAD, MVT::v2i64, MVT::v2i32, Legal);
1133 // i8 and i16 vectors are custom because the source register and source
1134 // source memory operand types are not the same width. f32 vectors are
1135 // custom since the immediate controlling the insert encodes additional
1137 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i8, Custom);
1138 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
1139 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4i32, Custom);
1140 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
1142 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i8, Custom);
1143 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i16, Custom);
1144 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4i32, Custom);
1145 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
1147 // FIXME: these should be Legal, but that's only for the case where
1148 // the index is constant. For now custom expand to deal with that.
1149 if (Subtarget->is64Bit()) {
1150 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v2i64, Custom);
1151 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v2i64, Custom);
1155 if (Subtarget->hasSSE2()) {
1156 setOperationAction(ISD::SRL, MVT::v8i16, Custom);
1157 setOperationAction(ISD::SRL, MVT::v16i8, Custom);
1159 setOperationAction(ISD::SHL, MVT::v8i16, Custom);
1160 setOperationAction(ISD::SHL, MVT::v16i8, Custom);
1162 setOperationAction(ISD::SRA, MVT::v8i16, Custom);
1163 setOperationAction(ISD::SRA, MVT::v16i8, Custom);
1165 // In the customized shift lowering, the legal cases in AVX2 will be
1167 setOperationAction(ISD::SRL, MVT::v2i64, Custom);
1168 setOperationAction(ISD::SRL, MVT::v4i32, Custom);
1170 setOperationAction(ISD::SHL, MVT::v2i64, Custom);
1171 setOperationAction(ISD::SHL, MVT::v4i32, Custom);
1173 setOperationAction(ISD::SRA, MVT::v4i32, Custom);
1176 if (!TM.Options.UseSoftFloat && Subtarget->hasFp256()) {
1177 addRegisterClass(MVT::v32i8, &X86::VR256RegClass);
1178 addRegisterClass(MVT::v16i16, &X86::VR256RegClass);
1179 addRegisterClass(MVT::v8i32, &X86::VR256RegClass);
1180 addRegisterClass(MVT::v8f32, &X86::VR256RegClass);
1181 addRegisterClass(MVT::v4i64, &X86::VR256RegClass);
1182 addRegisterClass(MVT::v4f64, &X86::VR256RegClass);
1184 setOperationAction(ISD::LOAD, MVT::v8f32, Legal);
1185 setOperationAction(ISD::LOAD, MVT::v4f64, Legal);
1186 setOperationAction(ISD::LOAD, MVT::v4i64, Legal);
1188 setOperationAction(ISD::FADD, MVT::v8f32, Legal);
1189 setOperationAction(ISD::FSUB, MVT::v8f32, Legal);
1190 setOperationAction(ISD::FMUL, MVT::v8f32, Legal);
1191 setOperationAction(ISD::FDIV, MVT::v8f32, Legal);
1192 setOperationAction(ISD::FSQRT, MVT::v8f32, Legal);
1193 setOperationAction(ISD::FFLOOR, MVT::v8f32, Legal);
1194 setOperationAction(ISD::FCEIL, MVT::v8f32, Legal);
1195 setOperationAction(ISD::FTRUNC, MVT::v8f32, Legal);
1196 setOperationAction(ISD::FRINT, MVT::v8f32, Legal);
1197 setOperationAction(ISD::FNEARBYINT, MVT::v8f32, Legal);
1198 setOperationAction(ISD::FNEG, MVT::v8f32, Custom);
1199 setOperationAction(ISD::FABS, MVT::v8f32, Custom);
1201 setOperationAction(ISD::FADD, MVT::v4f64, Legal);
1202 setOperationAction(ISD::FSUB, MVT::v4f64, Legal);
1203 setOperationAction(ISD::FMUL, MVT::v4f64, Legal);
1204 setOperationAction(ISD::FDIV, MVT::v4f64, Legal);
1205 setOperationAction(ISD::FSQRT, MVT::v4f64, Legal);
1206 setOperationAction(ISD::FFLOOR, MVT::v4f64, Legal);
1207 setOperationAction(ISD::FCEIL, MVT::v4f64, Legal);
1208 setOperationAction(ISD::FTRUNC, MVT::v4f64, Legal);
1209 setOperationAction(ISD::FRINT, MVT::v4f64, Legal);
1210 setOperationAction(ISD::FNEARBYINT, MVT::v4f64, Legal);
1211 setOperationAction(ISD::FNEG, MVT::v4f64, Custom);
1212 setOperationAction(ISD::FABS, MVT::v4f64, Custom);
1214 // (fp_to_int:v8i16 (v8f32 ..)) requires the result type to be promoted
1215 // even though v8i16 is a legal type.
1216 setOperationAction(ISD::FP_TO_SINT, MVT::v8i16, Promote);
1217 setOperationAction(ISD::FP_TO_UINT, MVT::v8i16, Promote);
1218 setOperationAction(ISD::FP_TO_SINT, MVT::v8i32, Legal);
1220 setOperationAction(ISD::SINT_TO_FP, MVT::v8i16, Promote);
1221 setOperationAction(ISD::SINT_TO_FP, MVT::v8i32, Legal);
1222 setOperationAction(ISD::FP_ROUND, MVT::v4f32, Legal);
1224 setOperationAction(ISD::UINT_TO_FP, MVT::v8i8, Custom);
1225 setOperationAction(ISD::UINT_TO_FP, MVT::v8i16, Custom);
1227 for (MVT VT : MVT::fp_vector_valuetypes())
1228 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v4f32, Legal);
1230 setOperationAction(ISD::SRL, MVT::v16i16, Custom);
1231 setOperationAction(ISD::SRL, MVT::v32i8, Custom);
1233 setOperationAction(ISD::SHL, MVT::v16i16, Custom);
1234 setOperationAction(ISD::SHL, MVT::v32i8, Custom);
1236 setOperationAction(ISD::SRA, MVT::v16i16, Custom);
1237 setOperationAction(ISD::SRA, MVT::v32i8, Custom);
1239 setOperationAction(ISD::SETCC, MVT::v32i8, Custom);
1240 setOperationAction(ISD::SETCC, MVT::v16i16, Custom);
1241 setOperationAction(ISD::SETCC, MVT::v8i32, Custom);
1242 setOperationAction(ISD::SETCC, MVT::v4i64, Custom);
1244 setOperationAction(ISD::SELECT, MVT::v4f64, Custom);
1245 setOperationAction(ISD::SELECT, MVT::v4i64, Custom);
1246 setOperationAction(ISD::SELECT, MVT::v8f32, Custom);
1248 setOperationAction(ISD::VSELECT, MVT::v4f64, Custom);
1249 setOperationAction(ISD::VSELECT, MVT::v4i64, Custom);
1250 setOperationAction(ISD::VSELECT, MVT::v8i32, Custom);
1251 setOperationAction(ISD::VSELECT, MVT::v8f32, Custom);
1253 setOperationAction(ISD::SIGN_EXTEND, MVT::v4i64, Custom);
1254 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i32, Custom);
1255 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1256 setOperationAction(ISD::ZERO_EXTEND, MVT::v4i64, Custom);
1257 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i32, Custom);
1258 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i16, Custom);
1259 setOperationAction(ISD::ANY_EXTEND, MVT::v4i64, Custom);
1260 setOperationAction(ISD::ANY_EXTEND, MVT::v8i32, Custom);
1261 setOperationAction(ISD::ANY_EXTEND, MVT::v16i16, Custom);
1262 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1263 setOperationAction(ISD::TRUNCATE, MVT::v8i16, Custom);
1264 setOperationAction(ISD::TRUNCATE, MVT::v4i32, Custom);
1266 if (Subtarget->hasFMA() || Subtarget->hasFMA4()) {
1267 setOperationAction(ISD::FMA, MVT::v8f32, Legal);
1268 setOperationAction(ISD::FMA, MVT::v4f64, Legal);
1269 setOperationAction(ISD::FMA, MVT::v4f32, Legal);
1270 setOperationAction(ISD::FMA, MVT::v2f64, Legal);
1271 setOperationAction(ISD::FMA, MVT::f32, Legal);
1272 setOperationAction(ISD::FMA, MVT::f64, Legal);
1275 if (Subtarget->hasInt256()) {
1276 setOperationAction(ISD::ADD, MVT::v4i64, Legal);
1277 setOperationAction(ISD::ADD, MVT::v8i32, Legal);
1278 setOperationAction(ISD::ADD, MVT::v16i16, Legal);
1279 setOperationAction(ISD::ADD, MVT::v32i8, Legal);
1281 setOperationAction(ISD::SUB, MVT::v4i64, Legal);
1282 setOperationAction(ISD::SUB, MVT::v8i32, Legal);
1283 setOperationAction(ISD::SUB, MVT::v16i16, Legal);
1284 setOperationAction(ISD::SUB, MVT::v32i8, Legal);
1286 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1287 setOperationAction(ISD::MUL, MVT::v8i32, Legal);
1288 setOperationAction(ISD::MUL, MVT::v16i16, Legal);
1289 // Don't lower v32i8 because there is no 128-bit byte mul
1291 setOperationAction(ISD::UMUL_LOHI, MVT::v8i32, Custom);
1292 setOperationAction(ISD::SMUL_LOHI, MVT::v8i32, Custom);
1293 setOperationAction(ISD::MULHU, MVT::v16i16, Legal);
1294 setOperationAction(ISD::MULHS, MVT::v16i16, Legal);
1296 setOperationAction(ISD::VSELECT, MVT::v16i16, Custom);
1297 setOperationAction(ISD::VSELECT, MVT::v32i8, Legal);
1299 // The custom lowering for UINT_TO_FP for v8i32 becomes interesting
1300 // when we have a 256bit-wide blend with immediate.
1301 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Custom);
1303 // Only provide customized ctpop vector bit twiddling for vector types we
1304 // know to perform better than using the popcnt instructions on each
1305 // vector element. If popcnt isn't supported, always provide the custom
1307 if (!Subtarget->hasPOPCNT())
1308 setOperationAction(ISD::CTPOP, MVT::v4i64, Custom);
1310 // Custom CTPOP always performs better on natively supported v8i32
1311 setOperationAction(ISD::CTPOP, MVT::v8i32, Custom);
1313 // AVX2 also has wider vector sign/zero extending loads, VPMOV[SZ]X
1314 setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1315 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1316 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1317 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1318 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1319 setLoadExtAction(ISD::SEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1321 setLoadExtAction(ISD::ZEXTLOAD, MVT::v16i16, MVT::v16i8, Legal);
1322 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i8, Legal);
1323 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i8, Legal);
1324 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i32, MVT::v8i16, Legal);
1325 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i16, Legal);
1326 setLoadExtAction(ISD::ZEXTLOAD, MVT::v4i64, MVT::v4i32, Legal);
1328 setOperationAction(ISD::ADD, MVT::v4i64, Custom);
1329 setOperationAction(ISD::ADD, MVT::v8i32, Custom);
1330 setOperationAction(ISD::ADD, MVT::v16i16, Custom);
1331 setOperationAction(ISD::ADD, MVT::v32i8, Custom);
1333 setOperationAction(ISD::SUB, MVT::v4i64, Custom);
1334 setOperationAction(ISD::SUB, MVT::v8i32, Custom);
1335 setOperationAction(ISD::SUB, MVT::v16i16, Custom);
1336 setOperationAction(ISD::SUB, MVT::v32i8, Custom);
1338 setOperationAction(ISD::MUL, MVT::v4i64, Custom);
1339 setOperationAction(ISD::MUL, MVT::v8i32, Custom);
1340 setOperationAction(ISD::MUL, MVT::v16i16, Custom);
1341 // Don't lower v32i8 because there is no 128-bit byte mul
1344 // In the customized shift lowering, the legal cases in AVX2 will be
1346 setOperationAction(ISD::SRL, MVT::v4i64, Custom);
1347 setOperationAction(ISD::SRL, MVT::v8i32, Custom);
1349 setOperationAction(ISD::SHL, MVT::v4i64, Custom);
1350 setOperationAction(ISD::SHL, MVT::v8i32, Custom);
1352 setOperationAction(ISD::SRA, MVT::v8i32, Custom);
1354 // Custom lower several nodes for 256-bit types.
1355 for (MVT VT : MVT::vector_valuetypes()) {
1356 if (VT.getScalarSizeInBits() >= 32) {
1357 setOperationAction(ISD::MLOAD, VT, Legal);
1358 setOperationAction(ISD::MSTORE, VT, Legal);
1360 // Extract subvector is special because the value type
1361 // (result) is 128-bit but the source is 256-bit wide.
1362 if (VT.is128BitVector()) {
1363 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1365 // Do not attempt to custom lower other non-256-bit vectors
1366 if (!VT.is256BitVector())
1369 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1370 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1371 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1372 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1373 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1374 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1375 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
1378 // Promote v32i8, v16i16, v8i32 select, and, or, xor to v4i64.
1379 for (int i = MVT::v32i8; i != MVT::v4i64; ++i) {
1380 MVT VT = (MVT::SimpleValueType)i;
1382 // Do not attempt to promote non-256-bit vectors
1383 if (!VT.is256BitVector())
1386 setOperationAction(ISD::AND, VT, Promote);
1387 AddPromotedToType (ISD::AND, VT, MVT::v4i64);
1388 setOperationAction(ISD::OR, VT, Promote);
1389 AddPromotedToType (ISD::OR, VT, MVT::v4i64);
1390 setOperationAction(ISD::XOR, VT, Promote);
1391 AddPromotedToType (ISD::XOR, VT, MVT::v4i64);
1392 setOperationAction(ISD::LOAD, VT, Promote);
1393 AddPromotedToType (ISD::LOAD, VT, MVT::v4i64);
1394 setOperationAction(ISD::SELECT, VT, Promote);
1395 AddPromotedToType (ISD::SELECT, VT, MVT::v4i64);
1399 if (!TM.Options.UseSoftFloat && Subtarget->hasAVX512()) {
1400 addRegisterClass(MVT::v16i32, &X86::VR512RegClass);
1401 addRegisterClass(MVT::v16f32, &X86::VR512RegClass);
1402 addRegisterClass(MVT::v8i64, &X86::VR512RegClass);
1403 addRegisterClass(MVT::v8f64, &X86::VR512RegClass);
1405 addRegisterClass(MVT::i1, &X86::VK1RegClass);
1406 addRegisterClass(MVT::v8i1, &X86::VK8RegClass);
1407 addRegisterClass(MVT::v16i1, &X86::VK16RegClass);
1409 for (MVT VT : MVT::fp_vector_valuetypes())
1410 setLoadExtAction(ISD::EXTLOAD, VT, MVT::v8f32, Legal);
1412 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1413 setOperationAction(ISD::SETCC, MVT::i1, Custom);
1414 setOperationAction(ISD::XOR, MVT::i1, Legal);
1415 setOperationAction(ISD::OR, MVT::i1, Legal);
1416 setOperationAction(ISD::AND, MVT::i1, Legal);
1417 setOperationAction(ISD::LOAD, MVT::v16f32, Legal);
1418 setOperationAction(ISD::LOAD, MVT::v8f64, Legal);
1419 setOperationAction(ISD::LOAD, MVT::v8i64, Legal);
1420 setOperationAction(ISD::LOAD, MVT::v16i32, Legal);
1421 setOperationAction(ISD::LOAD, MVT::v16i1, Legal);
1423 setOperationAction(ISD::FADD, MVT::v16f32, Legal);
1424 setOperationAction(ISD::FSUB, MVT::v16f32, Legal);
1425 setOperationAction(ISD::FMUL, MVT::v16f32, Legal);
1426 setOperationAction(ISD::FDIV, MVT::v16f32, Legal);
1427 setOperationAction(ISD::FSQRT, MVT::v16f32, Legal);
1428 setOperationAction(ISD::FNEG, MVT::v16f32, Custom);
1430 setOperationAction(ISD::FADD, MVT::v8f64, Legal);
1431 setOperationAction(ISD::FSUB, MVT::v8f64, Legal);
1432 setOperationAction(ISD::FMUL, MVT::v8f64, Legal);
1433 setOperationAction(ISD::FDIV, MVT::v8f64, Legal);
1434 setOperationAction(ISD::FSQRT, MVT::v8f64, Legal);
1435 setOperationAction(ISD::FNEG, MVT::v8f64, Custom);
1436 setOperationAction(ISD::FMA, MVT::v8f64, Legal);
1437 setOperationAction(ISD::FMA, MVT::v16f32, Legal);
1439 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1440 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1441 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1442 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1443 if (Subtarget->is64Bit()) {
1444 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1445 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1446 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1447 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1449 setOperationAction(ISD::FP_TO_SINT, MVT::v16i32, Legal);
1450 setOperationAction(ISD::FP_TO_UINT, MVT::v16i32, Legal);
1451 setOperationAction(ISD::FP_TO_UINT, MVT::v8i32, Legal);
1452 setOperationAction(ISD::FP_TO_UINT, MVT::v4i32, Legal);
1453 setOperationAction(ISD::SINT_TO_FP, MVT::v16i32, Legal);
1454 setOperationAction(ISD::SINT_TO_FP, MVT::v8i1, Custom);
1455 setOperationAction(ISD::SINT_TO_FP, MVT::v16i1, Custom);
1456 setOperationAction(ISD::SINT_TO_FP, MVT::v16i8, Promote);
1457 setOperationAction(ISD::SINT_TO_FP, MVT::v16i16, Promote);
1458 setOperationAction(ISD::UINT_TO_FP, MVT::v16i32, Legal);
1459 setOperationAction(ISD::UINT_TO_FP, MVT::v8i32, Legal);
1460 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Legal);
1461 setOperationAction(ISD::FP_ROUND, MVT::v8f32, Legal);
1462 setOperationAction(ISD::FP_EXTEND, MVT::v8f32, Legal);
1464 setOperationAction(ISD::TRUNCATE, MVT::i1, Custom);
1465 setOperationAction(ISD::TRUNCATE, MVT::v16i8, Custom);
1466 setOperationAction(ISD::TRUNCATE, MVT::v8i32, Custom);
1467 setOperationAction(ISD::TRUNCATE, MVT::v8i1, Custom);
1468 setOperationAction(ISD::TRUNCATE, MVT::v16i1, Custom);
1469 setOperationAction(ISD::TRUNCATE, MVT::v16i16, Custom);
1470 setOperationAction(ISD::ZERO_EXTEND, MVT::v16i32, Custom);
1471 setOperationAction(ISD::ZERO_EXTEND, MVT::v8i64, Custom);
1472 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
1473 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
1474 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i8, Custom);
1475 setOperationAction(ISD::SIGN_EXTEND, MVT::v8i16, Custom);
1476 setOperationAction(ISD::SIGN_EXTEND, MVT::v16i16, Custom);
1478 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8f64, Custom);
1479 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i64, Custom);
1480 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16f32, Custom);
1481 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i32, Custom);
1482 setOperationAction(ISD::CONCAT_VECTORS, MVT::v8i1, Custom);
1483 setOperationAction(ISD::CONCAT_VECTORS, MVT::v16i1, Legal);
1485 setOperationAction(ISD::SETCC, MVT::v16i1, Custom);
1486 setOperationAction(ISD::SETCC, MVT::v8i1, Custom);
1488 setOperationAction(ISD::MUL, MVT::v8i64, Custom);
1490 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v8i1, Custom);
1491 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v16i1, Custom);
1492 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v16i1, Custom);
1493 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i1, Custom);
1494 setOperationAction(ISD::BUILD_VECTOR, MVT::v8i1, Custom);
1495 setOperationAction(ISD::BUILD_VECTOR, MVT::v16i1, Custom);
1496 setOperationAction(ISD::SELECT, MVT::v8f64, Custom);
1497 setOperationAction(ISD::SELECT, MVT::v8i64, Custom);
1498 setOperationAction(ISD::SELECT, MVT::v16f32, Custom);
1500 setOperationAction(ISD::ADD, MVT::v8i64, Legal);
1501 setOperationAction(ISD::ADD, MVT::v16i32, Legal);
1503 setOperationAction(ISD::SUB, MVT::v8i64, Legal);
1504 setOperationAction(ISD::SUB, MVT::v16i32, Legal);
1506 setOperationAction(ISD::MUL, MVT::v16i32, Legal);
1508 setOperationAction(ISD::SRL, MVT::v8i64, Custom);
1509 setOperationAction(ISD::SRL, MVT::v16i32, Custom);
1511 setOperationAction(ISD::SHL, MVT::v8i64, Custom);
1512 setOperationAction(ISD::SHL, MVT::v16i32, Custom);
1514 setOperationAction(ISD::SRA, MVT::v8i64, Custom);
1515 setOperationAction(ISD::SRA, MVT::v16i32, Custom);
1517 setOperationAction(ISD::AND, MVT::v8i64, Legal);
1518 setOperationAction(ISD::OR, MVT::v8i64, Legal);
1519 setOperationAction(ISD::XOR, MVT::v8i64, Legal);
1520 setOperationAction(ISD::AND, MVT::v16i32, Legal);
1521 setOperationAction(ISD::OR, MVT::v16i32, Legal);
1522 setOperationAction(ISD::XOR, MVT::v16i32, Legal);
1524 if (Subtarget->hasCDI()) {
1525 setOperationAction(ISD::CTLZ, MVT::v8i64, Legal);
1526 setOperationAction(ISD::CTLZ, MVT::v16i32, Legal);
1529 // Custom lower several nodes.
1530 for (MVT VT : MVT::vector_valuetypes()) {
1531 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1532 // Extract subvector is special because the value type
1533 // (result) is 256/128-bit but the source is 512-bit wide.
1534 if (VT.is128BitVector() || VT.is256BitVector()) {
1535 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
1537 if (VT.getVectorElementType() == MVT::i1)
1538 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Legal);
1540 // Do not attempt to custom lower other non-512-bit vectors
1541 if (!VT.is512BitVector())
1544 if ( EltSize >= 32) {
1545 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
1546 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
1547 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1548 setOperationAction(ISD::VSELECT, VT, Legal);
1549 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
1550 setOperationAction(ISD::SCALAR_TO_VECTOR, VT, Custom);
1551 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
1552 setOperationAction(ISD::MLOAD, VT, Legal);
1553 setOperationAction(ISD::MSTORE, VT, Legal);
1556 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1557 MVT VT = (MVT::SimpleValueType)i;
1559 // Do not attempt to promote non-512-bit vectors.
1560 if (!VT.is512BitVector())
1563 setOperationAction(ISD::SELECT, VT, Promote);
1564 AddPromotedToType (ISD::SELECT, VT, MVT::v8i64);
1568 if (!TM.Options.UseSoftFloat && Subtarget->hasBWI()) {
1569 addRegisterClass(MVT::v32i16, &X86::VR512RegClass);
1570 addRegisterClass(MVT::v64i8, &X86::VR512RegClass);
1572 addRegisterClass(MVT::v32i1, &X86::VK32RegClass);
1573 addRegisterClass(MVT::v64i1, &X86::VK64RegClass);
1575 setOperationAction(ISD::LOAD, MVT::v32i16, Legal);
1576 setOperationAction(ISD::LOAD, MVT::v64i8, Legal);
1577 setOperationAction(ISD::SETCC, MVT::v32i1, Custom);
1578 setOperationAction(ISD::SETCC, MVT::v64i1, Custom);
1579 setOperationAction(ISD::ADD, MVT::v32i16, Legal);
1580 setOperationAction(ISD::ADD, MVT::v64i8, Legal);
1581 setOperationAction(ISD::SUB, MVT::v32i16, Legal);
1582 setOperationAction(ISD::SUB, MVT::v64i8, Legal);
1583 setOperationAction(ISD::MUL, MVT::v32i16, Legal);
1585 for (int i = MVT::v32i8; i != MVT::v8i64; ++i) {
1586 const MVT VT = (MVT::SimpleValueType)i;
1588 const unsigned EltSize = VT.getVectorElementType().getSizeInBits();
1590 // Do not attempt to promote non-512-bit vectors.
1591 if (!VT.is512BitVector())
1595 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
1596 setOperationAction(ISD::VSELECT, VT, Legal);
1601 if (!TM.Options.UseSoftFloat && Subtarget->hasVLX()) {
1602 addRegisterClass(MVT::v4i1, &X86::VK4RegClass);
1603 addRegisterClass(MVT::v2i1, &X86::VK2RegClass);
1605 setOperationAction(ISD::SETCC, MVT::v4i1, Custom);
1606 setOperationAction(ISD::SETCC, MVT::v2i1, Custom);
1607 setOperationAction(ISD::INSERT_SUBVECTOR, MVT::v8i1, Legal);
1609 setOperationAction(ISD::AND, MVT::v8i32, Legal);
1610 setOperationAction(ISD::OR, MVT::v8i32, Legal);
1611 setOperationAction(ISD::XOR, MVT::v8i32, Legal);
1612 setOperationAction(ISD::AND, MVT::v4i32, Legal);
1613 setOperationAction(ISD::OR, MVT::v4i32, Legal);
1614 setOperationAction(ISD::XOR, MVT::v4i32, Legal);
1617 // SIGN_EXTEND_INREGs are evaluated by the extend type. Handle the expansion
1618 // of this type with custom code.
1619 for (MVT VT : MVT::vector_valuetypes())
1620 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Custom);
1622 // We want to custom lower some of our intrinsics.
1623 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
1624 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
1625 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
1626 if (!Subtarget->is64Bit())
1627 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
1629 // Only custom-lower 64-bit SADDO and friends on 64-bit because we don't
1630 // handle type legalization for these operations here.
1632 // FIXME: We really should do custom legalization for addition and
1633 // subtraction on x86-32 once PR3203 is fixed. We really can't do much better
1634 // than generic legalization for 64-bit multiplication-with-overflow, though.
1635 for (unsigned i = 0, e = 3+Subtarget->is64Bit(); i != e; ++i) {
1636 // Add/Sub/Mul with overflow operations are custom lowered.
1638 setOperationAction(ISD::SADDO, VT, Custom);
1639 setOperationAction(ISD::UADDO, VT, Custom);
1640 setOperationAction(ISD::SSUBO, VT, Custom);
1641 setOperationAction(ISD::USUBO, VT, Custom);
1642 setOperationAction(ISD::SMULO, VT, Custom);
1643 setOperationAction(ISD::UMULO, VT, Custom);
1647 if (!Subtarget->is64Bit()) {
1648 // These libcalls are not available in 32-bit.
1649 setLibcallName(RTLIB::SHL_I128, nullptr);
1650 setLibcallName(RTLIB::SRL_I128, nullptr);
1651 setLibcallName(RTLIB::SRA_I128, nullptr);
1654 // Combine sin / cos into one node or libcall if possible.
1655 if (Subtarget->hasSinCos()) {
1656 setLibcallName(RTLIB::SINCOS_F32, "sincosf");
1657 setLibcallName(RTLIB::SINCOS_F64, "sincos");
1658 if (Subtarget->isTargetDarwin()) {
1659 // For MacOSX, we don't want the normal expansion of a libcall to sincos.
1660 // We want to issue a libcall to __sincos_stret to avoid memory traffic.
1661 setOperationAction(ISD::FSINCOS, MVT::f64, Custom);
1662 setOperationAction(ISD::FSINCOS, MVT::f32, Custom);
1666 if (Subtarget->isTargetWin64()) {
1667 setOperationAction(ISD::SDIV, MVT::i128, Custom);
1668 setOperationAction(ISD::UDIV, MVT::i128, Custom);
1669 setOperationAction(ISD::SREM, MVT::i128, Custom);
1670 setOperationAction(ISD::UREM, MVT::i128, Custom);
1671 setOperationAction(ISD::SDIVREM, MVT::i128, Custom);
1672 setOperationAction(ISD::UDIVREM, MVT::i128, Custom);
1675 // We have target-specific dag combine patterns for the following nodes:
1676 setTargetDAGCombine(ISD::VECTOR_SHUFFLE);
1677 setTargetDAGCombine(ISD::EXTRACT_VECTOR_ELT);
1678 setTargetDAGCombine(ISD::BITCAST);
1679 setTargetDAGCombine(ISD::VSELECT);
1680 setTargetDAGCombine(ISD::SELECT);
1681 setTargetDAGCombine(ISD::SHL);
1682 setTargetDAGCombine(ISD::SRA);
1683 setTargetDAGCombine(ISD::SRL);
1684 setTargetDAGCombine(ISD::OR);
1685 setTargetDAGCombine(ISD::AND);
1686 setTargetDAGCombine(ISD::ADD);
1687 setTargetDAGCombine(ISD::FADD);
1688 setTargetDAGCombine(ISD::FSUB);
1689 setTargetDAGCombine(ISD::FMA);
1690 setTargetDAGCombine(ISD::SUB);
1691 setTargetDAGCombine(ISD::LOAD);
1692 setTargetDAGCombine(ISD::MLOAD);
1693 setTargetDAGCombine(ISD::STORE);
1694 setTargetDAGCombine(ISD::MSTORE);
1695 setTargetDAGCombine(ISD::ZERO_EXTEND);
1696 setTargetDAGCombine(ISD::ANY_EXTEND);
1697 setTargetDAGCombine(ISD::SIGN_EXTEND);
1698 setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
1699 setTargetDAGCombine(ISD::TRUNCATE);
1700 setTargetDAGCombine(ISD::SINT_TO_FP);
1701 setTargetDAGCombine(ISD::SETCC);
1702 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
1703 setTargetDAGCombine(ISD::BUILD_VECTOR);
1704 setTargetDAGCombine(ISD::MUL);
1705 setTargetDAGCombine(ISD::XOR);
1707 computeRegisterProperties();
1709 // On Darwin, -Os means optimize for size without hurting performance,
1710 // do not reduce the limit.
1711 MaxStoresPerMemset = 16; // For @llvm.memset -> sequence of stores
1712 MaxStoresPerMemsetOptSize = Subtarget->isTargetDarwin() ? 16 : 8;
1713 MaxStoresPerMemcpy = 8; // For @llvm.memcpy -> sequence of stores
1714 MaxStoresPerMemcpyOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1715 MaxStoresPerMemmove = 8; // For @llvm.memmove -> sequence of stores
1716 MaxStoresPerMemmoveOptSize = Subtarget->isTargetDarwin() ? 8 : 4;
1717 setPrefLoopAlignment(4); // 2^4 bytes.
1719 // Predictable cmov don't hurt on atom because it's in-order.
1720 PredictableSelectIsExpensive = !Subtarget->isAtom();
1721 EnableExtLdPromotion = true;
1722 setPrefFunctionAlignment(4); // 2^4 bytes.
1724 verifyIntrinsicTables();
1727 // This has so far only been implemented for 64-bit MachO.
1728 bool X86TargetLowering::useLoadStackGuardNode() const {
1729 return Subtarget->isTargetMachO() && Subtarget->is64Bit();
1732 TargetLoweringBase::LegalizeTypeAction
1733 X86TargetLowering::getPreferredVectorAction(EVT VT) const {
1734 if (ExperimentalVectorWideningLegalization &&
1735 VT.getVectorNumElements() != 1 &&
1736 VT.getVectorElementType().getSimpleVT() != MVT::i1)
1737 return TypeWidenVector;
1739 return TargetLoweringBase::getPreferredVectorAction(VT);
1742 EVT X86TargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
1744 return Subtarget->hasAVX512() ? MVT::i1: MVT::i8;
1746 const unsigned NumElts = VT.getVectorNumElements();
1747 const EVT EltVT = VT.getVectorElementType();
1748 if (VT.is512BitVector()) {
1749 if (Subtarget->hasAVX512())
1750 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1751 EltVT == MVT::f32 || EltVT == MVT::f64)
1753 case 8: return MVT::v8i1;
1754 case 16: return MVT::v16i1;
1756 if (Subtarget->hasBWI())
1757 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1759 case 32: return MVT::v32i1;
1760 case 64: return MVT::v64i1;
1764 if (VT.is256BitVector() || VT.is128BitVector()) {
1765 if (Subtarget->hasVLX())
1766 if (EltVT == MVT::i32 || EltVT == MVT::i64 ||
1767 EltVT == MVT::f32 || EltVT == MVT::f64)
1769 case 2: return MVT::v2i1;
1770 case 4: return MVT::v4i1;
1771 case 8: return MVT::v8i1;
1773 if (Subtarget->hasBWI() && Subtarget->hasVLX())
1774 if (EltVT == MVT::i8 || EltVT == MVT::i16)
1776 case 8: return MVT::v8i1;
1777 case 16: return MVT::v16i1;
1778 case 32: return MVT::v32i1;
1782 return VT.changeVectorElementTypeToInteger();
1785 /// Helper for getByValTypeAlignment to determine
1786 /// the desired ByVal argument alignment.
1787 static void getMaxByValAlign(Type *Ty, unsigned &MaxAlign) {
1790 if (VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1791 if (VTy->getBitWidth() == 128)
1793 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1794 unsigned EltAlign = 0;
1795 getMaxByValAlign(ATy->getElementType(), EltAlign);
1796 if (EltAlign > MaxAlign)
1797 MaxAlign = EltAlign;
1798 } else if (StructType *STy = dyn_cast<StructType>(Ty)) {
1799 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1800 unsigned EltAlign = 0;
1801 getMaxByValAlign(STy->getElementType(i), EltAlign);
1802 if (EltAlign > MaxAlign)
1803 MaxAlign = EltAlign;
1810 /// Return the desired alignment for ByVal aggregate
1811 /// function arguments in the caller parameter area. For X86, aggregates
1812 /// that contain SSE vectors are placed at 16-byte boundaries while the rest
1813 /// are at 4-byte boundaries.
1814 unsigned X86TargetLowering::getByValTypeAlignment(Type *Ty) const {
1815 if (Subtarget->is64Bit()) {
1816 // Max of 8 and alignment of type.
1817 unsigned TyAlign = TD->getABITypeAlignment(Ty);
1824 if (Subtarget->hasSSE1())
1825 getMaxByValAlign(Ty, Align);
1829 /// Returns the target specific optimal type for load
1830 /// and store operations as a result of memset, memcpy, and memmove
1831 /// lowering. If DstAlign is zero that means it's safe to destination
1832 /// alignment can satisfy any constraint. Similarly if SrcAlign is zero it
1833 /// means there isn't a need to check it against alignment requirement,
1834 /// probably because the source does not need to be loaded. If 'IsMemset' is
1835 /// true, that means it's expanding a memset. If 'ZeroMemset' is true, that
1836 /// means it's a memset of zero. 'MemcpyStrSrc' indicates whether the memcpy
1837 /// source is constant so it does not need to be loaded.
1838 /// It returns EVT::Other if the type should be determined using generic
1839 /// target-independent logic.
1841 X86TargetLowering::getOptimalMemOpType(uint64_t Size,
1842 unsigned DstAlign, unsigned SrcAlign,
1843 bool IsMemset, bool ZeroMemset,
1845 MachineFunction &MF) const {
1846 const Function *F = MF.getFunction();
1847 if ((!IsMemset || ZeroMemset) &&
1848 !F->hasFnAttribute(Attribute::NoImplicitFloat)) {
1850 (Subtarget->isUnalignedMemAccessFast() ||
1851 ((DstAlign == 0 || DstAlign >= 16) &&
1852 (SrcAlign == 0 || SrcAlign >= 16)))) {
1854 if (Subtarget->hasInt256())
1856 if (Subtarget->hasFp256())
1859 if (Subtarget->hasSSE2())
1861 if (Subtarget->hasSSE1())
1863 } else if (!MemcpyStrSrc && Size >= 8 &&
1864 !Subtarget->is64Bit() &&
1865 Subtarget->hasSSE2()) {
1866 // Do not use f64 to lower memcpy if source is string constant. It's
1867 // better to use i32 to avoid the loads.
1871 if (Subtarget->is64Bit() && Size >= 8)
1876 bool X86TargetLowering::isSafeMemOpType(MVT VT) const {
1878 return X86ScalarSSEf32;
1879 else if (VT == MVT::f64)
1880 return X86ScalarSSEf64;
1885 X86TargetLowering::allowsMisalignedMemoryAccesses(EVT VT,
1890 *Fast = Subtarget->isUnalignedMemAccessFast();
1894 /// Return the entry encoding for a jump table in the
1895 /// current function. The returned value is a member of the
1896 /// MachineJumpTableInfo::JTEntryKind enum.
1897 unsigned X86TargetLowering::getJumpTableEncoding() const {
1898 // In GOT pic mode, each entry in the jump table is emitted as a @GOTOFF
1900 if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
1901 Subtarget->isPICStyleGOT())
1902 return MachineJumpTableInfo::EK_Custom32;
1904 // Otherwise, use the normal jump table encoding heuristics.
1905 return TargetLowering::getJumpTableEncoding();
1909 X86TargetLowering::LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
1910 const MachineBasicBlock *MBB,
1911 unsigned uid,MCContext &Ctx) const{
1912 assert(MBB->getParent()->getTarget().getRelocationModel() == Reloc::PIC_ &&
1913 Subtarget->isPICStyleGOT());
1914 // In 32-bit ELF systems, our jump table entries are formed with @GOTOFF
1916 return MCSymbolRefExpr::Create(MBB->getSymbol(),
1917 MCSymbolRefExpr::VK_GOTOFF, Ctx);
1920 /// Returns relocation base for the given PIC jumptable.
1921 SDValue X86TargetLowering::getPICJumpTableRelocBase(SDValue Table,
1922 SelectionDAG &DAG) const {
1923 if (!Subtarget->is64Bit())
1924 // This doesn't have SDLoc associated with it, but is not really the
1925 // same as a Register.
1926 return DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy());
1930 /// This returns the relocation base for the given PIC jumptable,
1931 /// the same as getPICJumpTableRelocBase, but as an MCExpr.
1932 const MCExpr *X86TargetLowering::
1933 getPICJumpTableRelocBaseExpr(const MachineFunction *MF, unsigned JTI,
1934 MCContext &Ctx) const {
1935 // X86-64 uses RIP relative addressing based on the jump table label.
1936 if (Subtarget->isPICStyleRIPRel())
1937 return TargetLowering::getPICJumpTableRelocBaseExpr(MF, JTI, Ctx);
1939 // Otherwise, the reference is relative to the PIC base.
1940 return MCSymbolRefExpr::Create(MF->getPICBaseSymbol(), Ctx);
1943 // FIXME: Why this routine is here? Move to RegInfo!
1944 std::pair<const TargetRegisterClass*, uint8_t>
1945 X86TargetLowering::findRepresentativeClass(MVT VT) const{
1946 const TargetRegisterClass *RRC = nullptr;
1948 switch (VT.SimpleTy) {
1950 return TargetLowering::findRepresentativeClass(VT);
1951 case MVT::i8: case MVT::i16: case MVT::i32: case MVT::i64:
1952 RRC = Subtarget->is64Bit() ? &X86::GR64RegClass : &X86::GR32RegClass;
1955 RRC = &X86::VR64RegClass;
1957 case MVT::f32: case MVT::f64:
1958 case MVT::v16i8: case MVT::v8i16: case MVT::v4i32: case MVT::v2i64:
1959 case MVT::v4f32: case MVT::v2f64:
1960 case MVT::v32i8: case MVT::v8i32: case MVT::v4i64: case MVT::v8f32:
1962 RRC = &X86::VR128RegClass;
1965 return std::make_pair(RRC, Cost);
1968 bool X86TargetLowering::getStackCookieLocation(unsigned &AddressSpace,
1969 unsigned &Offset) const {
1970 if (!Subtarget->isTargetLinux())
1973 if (Subtarget->is64Bit()) {
1974 // %fs:0x28, unless we're using a Kernel code model, in which case it's %gs:
1976 if (getTargetMachine().getCodeModel() == CodeModel::Kernel)
1988 bool X86TargetLowering::isNoopAddrSpaceCast(unsigned SrcAS,
1989 unsigned DestAS) const {
1990 assert(SrcAS != DestAS && "Expected different address spaces!");
1992 return SrcAS < 256 && DestAS < 256;
1995 //===----------------------------------------------------------------------===//
1996 // Return Value Calling Convention Implementation
1997 //===----------------------------------------------------------------------===//
1999 #include "X86GenCallingConv.inc"
2002 X86TargetLowering::CanLowerReturn(CallingConv::ID CallConv,
2003 MachineFunction &MF, bool isVarArg,
2004 const SmallVectorImpl<ISD::OutputArg> &Outs,
2005 LLVMContext &Context) const {
2006 SmallVector<CCValAssign, 16> RVLocs;
2007 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context);
2008 return CCInfo.CheckReturn(Outs, RetCC_X86);
2011 const MCPhysReg *X86TargetLowering::getScratchRegisters(CallingConv::ID) const {
2012 static const MCPhysReg ScratchRegs[] = { X86::R11, 0 };
2017 X86TargetLowering::LowerReturn(SDValue Chain,
2018 CallingConv::ID CallConv, bool isVarArg,
2019 const SmallVectorImpl<ISD::OutputArg> &Outs,
2020 const SmallVectorImpl<SDValue> &OutVals,
2021 SDLoc dl, SelectionDAG &DAG) const {
2022 MachineFunction &MF = DAG.getMachineFunction();
2023 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2025 SmallVector<CCValAssign, 16> RVLocs;
2026 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, *DAG.getContext());
2027 CCInfo.AnalyzeReturn(Outs, RetCC_X86);
2030 SmallVector<SDValue, 6> RetOps;
2031 RetOps.push_back(Chain); // Operand #0 = Chain (updated below)
2032 // Operand #1 = Bytes To Pop
2033 RetOps.push_back(DAG.getTargetConstant(FuncInfo->getBytesToPopOnReturn(),
2036 // Copy the result values into the output registers.
2037 for (unsigned i = 0; i != RVLocs.size(); ++i) {
2038 CCValAssign &VA = RVLocs[i];
2039 assert(VA.isRegLoc() && "Can only return in registers!");
2040 SDValue ValToCopy = OutVals[i];
2041 EVT ValVT = ValToCopy.getValueType();
2043 // Promote values to the appropriate types.
2044 if (VA.getLocInfo() == CCValAssign::SExt)
2045 ValToCopy = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), ValToCopy);
2046 else if (VA.getLocInfo() == CCValAssign::ZExt)
2047 ValToCopy = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), ValToCopy);
2048 else if (VA.getLocInfo() == CCValAssign::AExt)
2049 ValToCopy = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), ValToCopy);
2050 else if (VA.getLocInfo() == CCValAssign::BCvt)
2051 ValToCopy = DAG.getNode(ISD::BITCAST, dl, VA.getLocVT(), ValToCopy);
2053 assert(VA.getLocInfo() != CCValAssign::FPExt &&
2054 "Unexpected FP-extend for return value.");
2056 // If this is x86-64, and we disabled SSE, we can't return FP values,
2057 // or SSE or MMX vectors.
2058 if ((ValVT == MVT::f32 || ValVT == MVT::f64 ||
2059 VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) &&
2060 (Subtarget->is64Bit() && !Subtarget->hasSSE1())) {
2061 report_fatal_error("SSE register return with SSE disabled");
2063 // Likewise we can't return F64 values with SSE1 only. gcc does so, but
2064 // llvm-gcc has never done it right and no one has noticed, so this
2065 // should be OK for now.
2066 if (ValVT == MVT::f64 &&
2067 (Subtarget->is64Bit() && !Subtarget->hasSSE2()))
2068 report_fatal_error("SSE2 register return with SSE2 disabled");
2070 // Returns in ST0/ST1 are handled specially: these are pushed as operands to
2071 // the RET instruction and handled by the FP Stackifier.
2072 if (VA.getLocReg() == X86::FP0 ||
2073 VA.getLocReg() == X86::FP1) {
2074 // If this is a copy from an xmm register to ST(0), use an FPExtend to
2075 // change the value to the FP stack register class.
2076 if (isScalarFPTypeInSSEReg(VA.getValVT()))
2077 ValToCopy = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f80, ValToCopy);
2078 RetOps.push_back(ValToCopy);
2079 // Don't emit a copytoreg.
2083 // 64-bit vector (MMX) values are returned in XMM0 / XMM1 except for v1i64
2084 // which is returned in RAX / RDX.
2085 if (Subtarget->is64Bit()) {
2086 if (ValVT == MVT::x86mmx) {
2087 if (VA.getLocReg() == X86::XMM0 || VA.getLocReg() == X86::XMM1) {
2088 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::i64, ValToCopy);
2089 ValToCopy = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
2091 // If we don't have SSE2 available, convert to v4f32 so the generated
2092 // register is legal.
2093 if (!Subtarget->hasSSE2())
2094 ValToCopy = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32,ValToCopy);
2099 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), ValToCopy, Flag);
2100 Flag = Chain.getValue(1);
2101 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2104 // The x86-64 ABIs require that for returning structs by value we copy
2105 // the sret argument into %rax/%eax (depending on ABI) for the return.
2106 // Win32 requires us to put the sret argument to %eax as well.
2107 // We saved the argument into a virtual register in the entry block,
2108 // so now we copy the value out and into %rax/%eax.
2110 // Checking Function.hasStructRetAttr() here is insufficient because the IR
2111 // may not have an explicit sret argument. If FuncInfo.CanLowerReturn is
2112 // false, then an sret argument may be implicitly inserted in the SelDAG. In
2113 // either case FuncInfo->setSRetReturnReg() will have been called.
2114 if (unsigned SRetReg = FuncInfo->getSRetReturnReg()) {
2115 assert((Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) &&
2116 "No need for an sret register");
2117 SDValue Val = DAG.getCopyFromReg(Chain, dl, SRetReg, getPointerTy());
2120 = (Subtarget->is64Bit() && !Subtarget->isTarget64BitILP32()) ?
2121 X86::RAX : X86::EAX;
2122 Chain = DAG.getCopyToReg(Chain, dl, RetValReg, Val, Flag);
2123 Flag = Chain.getValue(1);
2125 // RAX/EAX now acts like a return value.
2126 RetOps.push_back(DAG.getRegister(RetValReg, getPointerTy()));
2129 RetOps[0] = Chain; // Update chain.
2131 // Add the flag if we have it.
2133 RetOps.push_back(Flag);
2135 return DAG.getNode(X86ISD::RET_FLAG, dl, MVT::Other, RetOps);
2138 bool X86TargetLowering::isUsedByReturnOnly(SDNode *N, SDValue &Chain) const {
2139 if (N->getNumValues() != 1)
2141 if (!N->hasNUsesOfValue(1, 0))
2144 SDValue TCChain = Chain;
2145 SDNode *Copy = *N->use_begin();
2146 if (Copy->getOpcode() == ISD::CopyToReg) {
2147 // If the copy has a glue operand, we conservatively assume it isn't safe to
2148 // perform a tail call.
2149 if (Copy->getOperand(Copy->getNumOperands()-1).getValueType() == MVT::Glue)
2151 TCChain = Copy->getOperand(0);
2152 } else if (Copy->getOpcode() != ISD::FP_EXTEND)
2155 bool HasRet = false;
2156 for (SDNode::use_iterator UI = Copy->use_begin(), UE = Copy->use_end();
2158 if (UI->getOpcode() != X86ISD::RET_FLAG)
2160 // If we are returning more than one value, we can definitely
2161 // not make a tail call see PR19530
2162 if (UI->getNumOperands() > 4)
2164 if (UI->getNumOperands() == 4 &&
2165 UI->getOperand(UI->getNumOperands()-1).getValueType() != MVT::Glue)
2178 X86TargetLowering::getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2179 ISD::NodeType ExtendKind) const {
2181 // TODO: Is this also valid on 32-bit?
2182 if (Subtarget->is64Bit() && VT == MVT::i1 && ExtendKind == ISD::ZERO_EXTEND)
2183 ReturnMVT = MVT::i8;
2185 ReturnMVT = MVT::i32;
2187 EVT MinVT = getRegisterType(Context, ReturnMVT);
2188 return VT.bitsLT(MinVT) ? MinVT : VT;
2191 /// Lower the result values of a call into the
2192 /// appropriate copies out of appropriate physical registers.
2195 X86TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
2196 CallingConv::ID CallConv, bool isVarArg,
2197 const SmallVectorImpl<ISD::InputArg> &Ins,
2198 SDLoc dl, SelectionDAG &DAG,
2199 SmallVectorImpl<SDValue> &InVals) const {
2201 // Assign locations to each value returned by this call.
2202 SmallVector<CCValAssign, 16> RVLocs;
2203 bool Is64Bit = Subtarget->is64Bit();
2204 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
2206 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
2208 // Copy all of the result registers out of their specified physreg.
2209 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
2210 CCValAssign &VA = RVLocs[i];
2211 EVT CopyVT = VA.getValVT();
2213 // If this is x86-64, and we disabled SSE, we can't return FP values
2214 if ((CopyVT == MVT::f32 || CopyVT == MVT::f64) &&
2215 ((Is64Bit || Ins[i].Flags.isInReg()) && !Subtarget->hasSSE1())) {
2216 report_fatal_error("SSE register return with SSE disabled");
2219 // If we prefer to use the value in xmm registers, copy it out as f80 and
2220 // use a truncate to move it from fp stack reg to xmm reg.
2221 if ((VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1) &&
2222 isScalarFPTypeInSSEReg(VA.getValVT()))
2225 Chain = DAG.getCopyFromReg(Chain, dl, VA.getLocReg(),
2226 CopyVT, InFlag).getValue(1);
2227 SDValue Val = Chain.getValue(0);
2229 if (CopyVT != VA.getValVT())
2230 Val = DAG.getNode(ISD::FP_ROUND, dl, VA.getValVT(), Val,
2231 // This truncation won't change the value.
2232 DAG.getIntPtrConstant(1));
2234 InFlag = Chain.getValue(2);
2235 InVals.push_back(Val);
2241 //===----------------------------------------------------------------------===//
2242 // C & StdCall & Fast Calling Convention implementation
2243 //===----------------------------------------------------------------------===//
2244 // StdCall calling convention seems to be standard for many Windows' API
2245 // routines and around. It differs from C calling convention just a little:
2246 // callee should clean up the stack, not caller. Symbols should be also
2247 // decorated in some fancy way :) It doesn't support any vector arguments.
2248 // For info on fast calling convention see Fast Calling Convention (tail call)
2249 // implementation LowerX86_32FastCCCallTo.
2251 /// CallIsStructReturn - Determines whether a call uses struct return
2253 enum StructReturnType {
2258 static StructReturnType
2259 callIsStructReturn(const SmallVectorImpl<ISD::OutputArg> &Outs) {
2261 return NotStructReturn;
2263 const ISD::ArgFlagsTy &Flags = Outs[0].Flags;
2264 if (!Flags.isSRet())
2265 return NotStructReturn;
2266 if (Flags.isInReg())
2267 return RegStructReturn;
2268 return StackStructReturn;
2271 /// Determines whether a function uses struct return semantics.
2272 static StructReturnType
2273 argsAreStructReturn(const SmallVectorImpl<ISD::InputArg> &Ins) {
2275 return NotStructReturn;
2277 const ISD::ArgFlagsTy &Flags = Ins[0].Flags;
2278 if (!Flags.isSRet())
2279 return NotStructReturn;
2280 if (Flags.isInReg())
2281 return RegStructReturn;
2282 return StackStructReturn;
2285 /// Make a copy of an aggregate at address specified by "Src" to address
2286 /// "Dst" with size and alignment information specified by the specific
2287 /// parameter attribute. The copy will be passed as a byval function parameter.
2289 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
2290 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
2292 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
2294 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
2295 /*isVolatile*/false, /*AlwaysInline=*/true,
2296 MachinePointerInfo(), MachinePointerInfo());
2299 /// Return true if the calling convention is one that
2300 /// supports tail call optimization.
2301 static bool IsTailCallConvention(CallingConv::ID CC) {
2302 return (CC == CallingConv::Fast || CC == CallingConv::GHC ||
2303 CC == CallingConv::HiPE);
2306 /// \brief Return true if the calling convention is a C calling convention.
2307 static bool IsCCallConvention(CallingConv::ID CC) {
2308 return (CC == CallingConv::C || CC == CallingConv::X86_64_Win64 ||
2309 CC == CallingConv::X86_64_SysV);
2312 bool X86TargetLowering::mayBeEmittedAsTailCall(CallInst *CI) const {
2313 if (!CI->isTailCall() || getTargetMachine().Options.DisableTailCalls)
2317 CallingConv::ID CalleeCC = CS.getCallingConv();
2318 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
2324 /// Return true if the function is being made into
2325 /// a tailcall target by changing its ABI.
2326 static bool FuncIsMadeTailCallSafe(CallingConv::ID CC,
2327 bool GuaranteedTailCallOpt) {
2328 return GuaranteedTailCallOpt && IsTailCallConvention(CC);
2332 X86TargetLowering::LowerMemArgument(SDValue Chain,
2333 CallingConv::ID CallConv,
2334 const SmallVectorImpl<ISD::InputArg> &Ins,
2335 SDLoc dl, SelectionDAG &DAG,
2336 const CCValAssign &VA,
2337 MachineFrameInfo *MFI,
2339 // Create the nodes corresponding to a load from this parameter slot.
2340 ISD::ArgFlagsTy Flags = Ins[i].Flags;
2341 bool AlwaysUseMutable = FuncIsMadeTailCallSafe(
2342 CallConv, DAG.getTarget().Options.GuaranteedTailCallOpt);
2343 bool isImmutable = !AlwaysUseMutable && !Flags.isByVal();
2346 // If value is passed by pointer we have address passed instead of the value
2348 if (VA.getLocInfo() == CCValAssign::Indirect)
2349 ValVT = VA.getLocVT();
2351 ValVT = VA.getValVT();
2353 // FIXME: For now, all byval parameter objects are marked mutable. This can be
2354 // changed with more analysis.
2355 // In case of tail call optimization mark all arguments mutable. Since they
2356 // could be overwritten by lowering of arguments in case of a tail call.
2357 if (Flags.isByVal()) {
2358 unsigned Bytes = Flags.getByValSize();
2359 if (Bytes == 0) Bytes = 1; // Don't create zero-sized stack objects.
2360 int FI = MFI->CreateFixedObject(Bytes, VA.getLocMemOffset(), isImmutable);
2361 return DAG.getFrameIndex(FI, getPointerTy());
2363 int FI = MFI->CreateFixedObject(ValVT.getSizeInBits()/8,
2364 VA.getLocMemOffset(), isImmutable);
2365 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy());
2366 return DAG.getLoad(ValVT, dl, Chain, FIN,
2367 MachinePointerInfo::getFixedStack(FI),
2368 false, false, false, 0);
2372 // FIXME: Get this from tablegen.
2373 static ArrayRef<MCPhysReg> get64BitArgumentGPRs(CallingConv::ID CallConv,
2374 const X86Subtarget *Subtarget) {
2375 assert(Subtarget->is64Bit());
2377 if (Subtarget->isCallingConvWin64(CallConv)) {
2378 static const MCPhysReg GPR64ArgRegsWin64[] = {
2379 X86::RCX, X86::RDX, X86::R8, X86::R9
2381 return makeArrayRef(std::begin(GPR64ArgRegsWin64), std::end(GPR64ArgRegsWin64));
2384 static const MCPhysReg GPR64ArgRegs64Bit[] = {
2385 X86::RDI, X86::RSI, X86::RDX, X86::RCX, X86::R8, X86::R9
2387 return makeArrayRef(std::begin(GPR64ArgRegs64Bit), std::end(GPR64ArgRegs64Bit));
2390 // FIXME: Get this from tablegen.
2391 static ArrayRef<MCPhysReg> get64BitArgumentXMMs(MachineFunction &MF,
2392 CallingConv::ID CallConv,
2393 const X86Subtarget *Subtarget) {
2394 assert(Subtarget->is64Bit());
2395 if (Subtarget->isCallingConvWin64(CallConv)) {
2396 // The XMM registers which might contain var arg parameters are shadowed
2397 // in their paired GPR. So we only need to save the GPR to their home
2399 // TODO: __vectorcall will change this.
2403 const Function *Fn = MF.getFunction();
2404 bool NoImplicitFloatOps = Fn->hasFnAttribute(Attribute::NoImplicitFloat);
2405 assert(!(MF.getTarget().Options.UseSoftFloat && NoImplicitFloatOps) &&
2406 "SSE register cannot be used when SSE is disabled!");
2407 if (MF.getTarget().Options.UseSoftFloat || NoImplicitFloatOps ||
2408 !Subtarget->hasSSE1())
2409 // Kernel mode asks for SSE to be disabled, so there are no XMM argument
2413 static const MCPhysReg XMMArgRegs64Bit[] = {
2414 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
2415 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
2417 return makeArrayRef(std::begin(XMMArgRegs64Bit), std::end(XMMArgRegs64Bit));
2421 X86TargetLowering::LowerFormalArguments(SDValue Chain,
2422 CallingConv::ID CallConv,
2424 const SmallVectorImpl<ISD::InputArg> &Ins,
2427 SmallVectorImpl<SDValue> &InVals)
2429 MachineFunction &MF = DAG.getMachineFunction();
2430 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
2432 const Function* Fn = MF.getFunction();
2433 if (Fn->hasExternalLinkage() &&
2434 Subtarget->isTargetCygMing() &&
2435 Fn->getName() == "main")
2436 FuncInfo->setForceFramePointer(true);
2438 MachineFrameInfo *MFI = MF.getFrameInfo();
2439 bool Is64Bit = Subtarget->is64Bit();
2440 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2442 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2443 "Var args not supported with calling convention fastcc, ghc or hipe");
2445 // Assign locations to all of the incoming arguments.
2446 SmallVector<CCValAssign, 16> ArgLocs;
2447 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2449 // Allocate shadow area for Win64
2451 CCInfo.AllocateStack(32, 8);
2453 CCInfo.AnalyzeFormalArguments(Ins, CC_X86);
2455 unsigned LastVal = ~0U;
2457 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2458 CCValAssign &VA = ArgLocs[i];
2459 // TODO: If an arg is passed in two places (e.g. reg and stack), skip later
2461 assert(VA.getValNo() != LastVal &&
2462 "Don't support value assigned to multiple locs yet");
2464 LastVal = VA.getValNo();
2466 if (VA.isRegLoc()) {
2467 EVT RegVT = VA.getLocVT();
2468 const TargetRegisterClass *RC;
2469 if (RegVT == MVT::i32)
2470 RC = &X86::GR32RegClass;
2471 else if (Is64Bit && RegVT == MVT::i64)
2472 RC = &X86::GR64RegClass;
2473 else if (RegVT == MVT::f32)
2474 RC = &X86::FR32RegClass;
2475 else if (RegVT == MVT::f64)
2476 RC = &X86::FR64RegClass;
2477 else if (RegVT.is512BitVector())
2478 RC = &X86::VR512RegClass;
2479 else if (RegVT.is256BitVector())
2480 RC = &X86::VR256RegClass;
2481 else if (RegVT.is128BitVector())
2482 RC = &X86::VR128RegClass;
2483 else if (RegVT == MVT::x86mmx)
2484 RC = &X86::VR64RegClass;
2485 else if (RegVT == MVT::i1)
2486 RC = &X86::VK1RegClass;
2487 else if (RegVT == MVT::v8i1)
2488 RC = &X86::VK8RegClass;
2489 else if (RegVT == MVT::v16i1)
2490 RC = &X86::VK16RegClass;
2491 else if (RegVT == MVT::v32i1)
2492 RC = &X86::VK32RegClass;
2493 else if (RegVT == MVT::v64i1)
2494 RC = &X86::VK64RegClass;
2496 llvm_unreachable("Unknown argument type!");
2498 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC);
2499 ArgValue = DAG.getCopyFromReg(Chain, dl, Reg, RegVT);
2501 // If this is an 8 or 16-bit value, it is really passed promoted to 32
2502 // bits. Insert an assert[sz]ext to capture this, then truncate to the
2504 if (VA.getLocInfo() == CCValAssign::SExt)
2505 ArgValue = DAG.getNode(ISD::AssertSext, dl, RegVT, ArgValue,
2506 DAG.getValueType(VA.getValVT()));
2507 else if (VA.getLocInfo() == CCValAssign::ZExt)
2508 ArgValue = DAG.getNode(ISD::AssertZext, dl, RegVT, ArgValue,
2509 DAG.getValueType(VA.getValVT()));
2510 else if (VA.getLocInfo() == CCValAssign::BCvt)
2511 ArgValue = DAG.getNode(ISD::BITCAST, dl, VA.getValVT(), ArgValue);
2513 if (VA.isExtInLoc()) {
2514 // Handle MMX values passed in XMM regs.
2515 if (RegVT.isVector())
2516 ArgValue = DAG.getNode(X86ISD::MOVDQ2Q, dl, VA.getValVT(), ArgValue);
2518 ArgValue = DAG.getNode(ISD::TRUNCATE, dl, VA.getValVT(), ArgValue);
2521 assert(VA.isMemLoc());
2522 ArgValue = LowerMemArgument(Chain, CallConv, Ins, dl, DAG, VA, MFI, i);
2525 // If value is passed via pointer - do a load.
2526 if (VA.getLocInfo() == CCValAssign::Indirect)
2527 ArgValue = DAG.getLoad(VA.getValVT(), dl, Chain, ArgValue,
2528 MachinePointerInfo(), false, false, false, 0);
2530 InVals.push_back(ArgValue);
2533 if (Subtarget->is64Bit() || Subtarget->isTargetKnownWindowsMSVC()) {
2534 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2535 // The x86-64 ABIs require that for returning structs by value we copy
2536 // the sret argument into %rax/%eax (depending on ABI) for the return.
2537 // Win32 requires us to put the sret argument to %eax as well.
2538 // Save the argument into a virtual register so that we can access it
2539 // from the return points.
2540 if (Ins[i].Flags.isSRet()) {
2541 unsigned Reg = FuncInfo->getSRetReturnReg();
2543 MVT PtrTy = getPointerTy();
2544 Reg = MF.getRegInfo().createVirtualRegister(getRegClassFor(PtrTy));
2545 FuncInfo->setSRetReturnReg(Reg);
2547 SDValue Copy = DAG.getCopyToReg(DAG.getEntryNode(), dl, Reg, InVals[i]);
2548 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Copy, Chain);
2554 unsigned StackSize = CCInfo.getNextStackOffset();
2555 // Align stack specially for tail calls.
2556 if (FuncIsMadeTailCallSafe(CallConv,
2557 MF.getTarget().Options.GuaranteedTailCallOpt))
2558 StackSize = GetAlignedArgumentStackSize(StackSize, DAG);
2560 // If the function takes variable number of arguments, make a frame index for
2561 // the start of the first vararg value... for expansion of llvm.va_start. We
2562 // can skip this if there are no va_start calls.
2563 if (MFI->hasVAStart() &&
2564 (Is64Bit || (CallConv != CallingConv::X86_FastCall &&
2565 CallConv != CallingConv::X86_ThisCall))) {
2566 FuncInfo->setVarArgsFrameIndex(
2567 MFI->CreateFixedObject(1, StackSize, true));
2570 // Figure out if XMM registers are in use.
2571 assert(!(MF.getTarget().Options.UseSoftFloat &&
2572 Fn->hasFnAttribute(Attribute::NoImplicitFloat)) &&
2573 "SSE register cannot be used when SSE is disabled!");
2575 // 64-bit calling conventions support varargs and register parameters, so we
2576 // have to do extra work to spill them in the prologue.
2577 if (Is64Bit && isVarArg && MFI->hasVAStart()) {
2578 // Find the first unallocated argument registers.
2579 ArrayRef<MCPhysReg> ArgGPRs = get64BitArgumentGPRs(CallConv, Subtarget);
2580 ArrayRef<MCPhysReg> ArgXMMs = get64BitArgumentXMMs(MF, CallConv, Subtarget);
2581 unsigned NumIntRegs =
2582 CCInfo.getFirstUnallocated(ArgGPRs.data(), ArgGPRs.size());
2583 unsigned NumXMMRegs =
2584 CCInfo.getFirstUnallocated(ArgXMMs.data(), ArgXMMs.size());
2585 assert(!(NumXMMRegs && !Subtarget->hasSSE1()) &&
2586 "SSE register cannot be used when SSE is disabled!");
2588 // Gather all the live in physical registers.
2589 SmallVector<SDValue, 6> LiveGPRs;
2590 SmallVector<SDValue, 8> LiveXMMRegs;
2592 for (MCPhysReg Reg : ArgGPRs.slice(NumIntRegs)) {
2593 unsigned GPR = MF.addLiveIn(Reg, &X86::GR64RegClass);
2595 DAG.getCopyFromReg(Chain, dl, GPR, MVT::i64));
2597 if (!ArgXMMs.empty()) {
2598 unsigned AL = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2599 ALVal = DAG.getCopyFromReg(Chain, dl, AL, MVT::i8);
2600 for (MCPhysReg Reg : ArgXMMs.slice(NumXMMRegs)) {
2601 unsigned XMMReg = MF.addLiveIn(Reg, &X86::VR128RegClass);
2602 LiveXMMRegs.push_back(
2603 DAG.getCopyFromReg(Chain, dl, XMMReg, MVT::v4f32));
2608 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
2609 // Get to the caller-allocated home save location. Add 8 to account
2610 // for the return address.
2611 int HomeOffset = TFI.getOffsetOfLocalArea() + 8;
2612 FuncInfo->setRegSaveFrameIndex(
2613 MFI->CreateFixedObject(1, NumIntRegs * 8 + HomeOffset, false));
2614 // Fixup to set vararg frame on shadow area (4 x i64).
2616 FuncInfo->setVarArgsFrameIndex(FuncInfo->getRegSaveFrameIndex());
2618 // For X86-64, if there are vararg parameters that are passed via
2619 // registers, then we must store them to their spots on the stack so
2620 // they may be loaded by deferencing the result of va_next.
2621 FuncInfo->setVarArgsGPOffset(NumIntRegs * 8);
2622 FuncInfo->setVarArgsFPOffset(ArgGPRs.size() * 8 + NumXMMRegs * 16);
2623 FuncInfo->setRegSaveFrameIndex(MFI->CreateStackObject(
2624 ArgGPRs.size() * 8 + ArgXMMs.size() * 16, 16, false));
2627 // Store the integer parameter registers.
2628 SmallVector<SDValue, 8> MemOps;
2629 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
2631 unsigned Offset = FuncInfo->getVarArgsGPOffset();
2632 for (SDValue Val : LiveGPRs) {
2633 SDValue FIN = DAG.getNode(ISD::ADD, dl, getPointerTy(), RSFIN,
2634 DAG.getIntPtrConstant(Offset));
2636 DAG.getStore(Val.getValue(1), dl, Val, FIN,
2637 MachinePointerInfo::getFixedStack(
2638 FuncInfo->getRegSaveFrameIndex(), Offset),
2640 MemOps.push_back(Store);
2644 if (!ArgXMMs.empty() && NumXMMRegs != ArgXMMs.size()) {
2645 // Now store the XMM (fp + vector) parameter registers.
2646 SmallVector<SDValue, 12> SaveXMMOps;
2647 SaveXMMOps.push_back(Chain);
2648 SaveXMMOps.push_back(ALVal);
2649 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2650 FuncInfo->getRegSaveFrameIndex()));
2651 SaveXMMOps.push_back(DAG.getIntPtrConstant(
2652 FuncInfo->getVarArgsFPOffset()));
2653 SaveXMMOps.insert(SaveXMMOps.end(), LiveXMMRegs.begin(),
2655 MemOps.push_back(DAG.getNode(X86ISD::VASTART_SAVE_XMM_REGS, dl,
2656 MVT::Other, SaveXMMOps));
2659 if (!MemOps.empty())
2660 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
2663 if (isVarArg && MFI->hasMustTailInVarArgFunc()) {
2664 // Find the largest legal vector type.
2665 MVT VecVT = MVT::Other;
2666 // FIXME: Only some x86_32 calling conventions support AVX512.
2667 if (Subtarget->hasAVX512() &&
2668 (Is64Bit || (CallConv == CallingConv::X86_VectorCall ||
2669 CallConv == CallingConv::Intel_OCL_BI)))
2670 VecVT = MVT::v16f32;
2671 else if (Subtarget->hasAVX())
2673 else if (Subtarget->hasSSE2())
2676 // We forward some GPRs and some vector types.
2677 SmallVector<MVT, 2> RegParmTypes;
2678 MVT IntVT = Is64Bit ? MVT::i64 : MVT::i32;
2679 RegParmTypes.push_back(IntVT);
2680 if (VecVT != MVT::Other)
2681 RegParmTypes.push_back(VecVT);
2683 // Compute the set of forwarded registers. The rest are scratch.
2684 SmallVectorImpl<ForwardedRegister> &Forwards =
2685 FuncInfo->getForwardedMustTailRegParms();
2686 CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, CC_X86);
2688 // Conservatively forward AL on x86_64, since it might be used for varargs.
2689 if (Is64Bit && !CCInfo.isAllocated(X86::AL)) {
2690 unsigned ALVReg = MF.addLiveIn(X86::AL, &X86::GR8RegClass);
2691 Forwards.push_back(ForwardedRegister(ALVReg, X86::AL, MVT::i8));
2694 // Copy all forwards from physical to virtual registers.
2695 for (ForwardedRegister &F : Forwards) {
2696 // FIXME: Can we use a less constrained schedule?
2697 SDValue RegVal = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
2698 F.VReg = MF.getRegInfo().createVirtualRegister(getRegClassFor(F.VT));
2699 Chain = DAG.getCopyToReg(Chain, dl, F.VReg, RegVal);
2703 // Some CCs need callee pop.
2704 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
2705 MF.getTarget().Options.GuaranteedTailCallOpt)) {
2706 FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything.
2708 FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing.
2709 // If this is an sret function, the return should pop the hidden pointer.
2710 if (!Is64Bit && !IsTailCallConvention(CallConv) &&
2711 !Subtarget->getTargetTriple().isOSMSVCRT() &&
2712 argsAreStructReturn(Ins) == StackStructReturn)
2713 FuncInfo->setBytesToPopOnReturn(4);
2717 // RegSaveFrameIndex is X86-64 only.
2718 FuncInfo->setRegSaveFrameIndex(0xAAAAAAA);
2719 if (CallConv == CallingConv::X86_FastCall ||
2720 CallConv == CallingConv::X86_ThisCall)
2721 // fastcc functions can't have varargs.
2722 FuncInfo->setVarArgsFrameIndex(0xAAAAAAA);
2725 FuncInfo->setArgumentStackSize(StackSize);
2731 X86TargetLowering::LowerMemOpCallTo(SDValue Chain,
2732 SDValue StackPtr, SDValue Arg,
2733 SDLoc dl, SelectionDAG &DAG,
2734 const CCValAssign &VA,
2735 ISD::ArgFlagsTy Flags) const {
2736 unsigned LocMemOffset = VA.getLocMemOffset();
2737 SDValue PtrOff = DAG.getIntPtrConstant(LocMemOffset);
2738 PtrOff = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, PtrOff);
2739 if (Flags.isByVal())
2740 return CreateCopyOfByValArgument(Arg, PtrOff, Chain, Flags, DAG, dl);
2742 return DAG.getStore(Chain, dl, Arg, PtrOff,
2743 MachinePointerInfo::getStack(LocMemOffset),
2747 /// Emit a load of return address if tail call
2748 /// optimization is performed and it is required.
2750 X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG,
2751 SDValue &OutRetAddr, SDValue Chain,
2752 bool IsTailCall, bool Is64Bit,
2753 int FPDiff, SDLoc dl) const {
2754 // Adjust the Return address stack slot.
2755 EVT VT = getPointerTy();
2756 OutRetAddr = getReturnAddressFrameIndex(DAG);
2758 // Load the "old" Return address.
2759 OutRetAddr = DAG.getLoad(VT, dl, Chain, OutRetAddr, MachinePointerInfo(),
2760 false, false, false, 0);
2761 return SDValue(OutRetAddr.getNode(), 1);
2764 /// Emit a store of the return address if tail call
2765 /// optimization is performed and it is required (FPDiff!=0).
2766 static SDValue EmitTailCallStoreRetAddr(SelectionDAG &DAG, MachineFunction &MF,
2767 SDValue Chain, SDValue RetAddrFrIdx,
2768 EVT PtrVT, unsigned SlotSize,
2769 int FPDiff, SDLoc dl) {
2770 // Store the return address to the appropriate stack slot.
2771 if (!FPDiff) return Chain;
2772 // Calculate the new stack slot for the return address.
2773 int NewReturnAddrFI =
2774 MF.getFrameInfo()->CreateFixedObject(SlotSize, (int64_t)FPDiff - SlotSize,
2776 SDValue NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, PtrVT);
2777 Chain = DAG.getStore(Chain, dl, RetAddrFrIdx, NewRetAddrFrIdx,
2778 MachinePointerInfo::getFixedStack(NewReturnAddrFI),
2784 X86TargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
2785 SmallVectorImpl<SDValue> &InVals) const {
2786 SelectionDAG &DAG = CLI.DAG;
2788 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2789 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2790 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2791 SDValue Chain = CLI.Chain;
2792 SDValue Callee = CLI.Callee;
2793 CallingConv::ID CallConv = CLI.CallConv;
2794 bool &isTailCall = CLI.IsTailCall;
2795 bool isVarArg = CLI.IsVarArg;
2797 MachineFunction &MF = DAG.getMachineFunction();
2798 bool Is64Bit = Subtarget->is64Bit();
2799 bool IsWin64 = Subtarget->isCallingConvWin64(CallConv);
2800 StructReturnType SR = callIsStructReturn(Outs);
2801 bool IsSibcall = false;
2802 X86MachineFunctionInfo *X86Info = MF.getInfo<X86MachineFunctionInfo>();
2804 if (MF.getTarget().Options.DisableTailCalls)
2807 bool IsMustTail = CLI.CS && CLI.CS->isMustTailCall();
2809 // Force this to be a tail call. The verifier rules are enough to ensure
2810 // that we can lower this successfully without moving the return address
2813 } else if (isTailCall) {
2814 // Check if it's really possible to do a tail call.
2815 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
2816 isVarArg, SR != NotStructReturn,
2817 MF.getFunction()->hasStructRetAttr(), CLI.RetTy,
2818 Outs, OutVals, Ins, DAG);
2820 // Sibcalls are automatically detected tailcalls which do not require
2822 if (!MF.getTarget().Options.GuaranteedTailCallOpt && isTailCall)
2829 assert(!(isVarArg && IsTailCallConvention(CallConv)) &&
2830 "Var args not supported with calling convention fastcc, ghc or hipe");
2832 // Analyze operands of the call, assigning locations to each operand.
2833 SmallVector<CCValAssign, 16> ArgLocs;
2834 CCState CCInfo(CallConv, isVarArg, MF, ArgLocs, *DAG.getContext());
2836 // Allocate shadow area for Win64
2838 CCInfo.AllocateStack(32, 8);
2840 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
2842 // Get a count of how many bytes are to be pushed on the stack.
2843 unsigned NumBytes = CCInfo.getNextStackOffset();
2845 // This is a sibcall. The memory operands are available in caller's
2846 // own caller's stack.
2848 else if (MF.getTarget().Options.GuaranteedTailCallOpt &&
2849 IsTailCallConvention(CallConv))
2850 NumBytes = GetAlignedArgumentStackSize(NumBytes, DAG);
2853 if (isTailCall && !IsSibcall && !IsMustTail) {
2854 // Lower arguments at fp - stackoffset + fpdiff.
2855 unsigned NumBytesCallerPushed = X86Info->getBytesToPopOnReturn();
2857 FPDiff = NumBytesCallerPushed - NumBytes;
2859 // Set the delta of movement of the returnaddr stackslot.
2860 // But only set if delta is greater than previous delta.
2861 if (FPDiff < X86Info->getTCReturnAddrDelta())
2862 X86Info->setTCReturnAddrDelta(FPDiff);
2865 unsigned NumBytesToPush = NumBytes;
2866 unsigned NumBytesToPop = NumBytes;
2868 // If we have an inalloca argument, all stack space has already been allocated
2869 // for us and be right at the top of the stack. We don't support multiple
2870 // arguments passed in memory when using inalloca.
2871 if (!Outs.empty() && Outs.back().Flags.isInAlloca()) {
2873 if (!ArgLocs.back().isMemLoc())
2874 report_fatal_error("cannot use inalloca attribute on a register "
2876 if (ArgLocs.back().getLocMemOffset() != 0)
2877 report_fatal_error("any parameter with the inalloca attribute must be "
2878 "the only memory argument");
2882 Chain = DAG.getCALLSEQ_START(
2883 Chain, DAG.getIntPtrConstant(NumBytesToPush, true), dl);
2885 SDValue RetAddrFrIdx;
2886 // Load return address for tail calls.
2887 if (isTailCall && FPDiff)
2888 Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall,
2889 Is64Bit, FPDiff, dl);
2891 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass;
2892 SmallVector<SDValue, 8> MemOpChains;
2895 // Walk the register/memloc assignments, inserting copies/loads. In the case
2896 // of tail call optimization arguments are handle later.
2897 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
2898 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2899 // Skip inalloca arguments, they have already been written.
2900 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2901 if (Flags.isInAlloca())
2904 CCValAssign &VA = ArgLocs[i];
2905 EVT RegVT = VA.getLocVT();
2906 SDValue Arg = OutVals[i];
2907 bool isByVal = Flags.isByVal();
2909 // Promote the value if needed.
2910 switch (VA.getLocInfo()) {
2911 default: llvm_unreachable("Unknown loc info!");
2912 case CCValAssign::Full: break;
2913 case CCValAssign::SExt:
2914 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, RegVT, Arg);
2916 case CCValAssign::ZExt:
2917 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, RegVT, Arg);
2919 case CCValAssign::AExt:
2920 if (RegVT.is128BitVector()) {
2921 // Special case: passing MMX values in XMM registers.
2922 Arg = DAG.getNode(ISD::BITCAST, dl, MVT::i64, Arg);
2923 Arg = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64, Arg);
2924 Arg = getMOVL(DAG, dl, MVT::v2i64, DAG.getUNDEF(MVT::v2i64), Arg);
2926 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, RegVT, Arg);
2928 case CCValAssign::BCvt:
2929 Arg = DAG.getNode(ISD::BITCAST, dl, RegVT, Arg);
2931 case CCValAssign::Indirect: {
2932 // Store the argument.
2933 SDValue SpillSlot = DAG.CreateStackTemporary(VA.getValVT());
2934 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2935 Chain = DAG.getStore(Chain, dl, Arg, SpillSlot,
2936 MachinePointerInfo::getFixedStack(FI),
2943 if (VA.isRegLoc()) {
2944 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
2945 if (isVarArg && IsWin64) {
2946 // Win64 ABI requires argument XMM reg to be copied to the corresponding
2947 // shadow reg if callee is a varargs function.
2948 unsigned ShadowReg = 0;
2949 switch (VA.getLocReg()) {
2950 case X86::XMM0: ShadowReg = X86::RCX; break;
2951 case X86::XMM1: ShadowReg = X86::RDX; break;
2952 case X86::XMM2: ShadowReg = X86::R8; break;
2953 case X86::XMM3: ShadowReg = X86::R9; break;
2956 RegsToPass.push_back(std::make_pair(ShadowReg, Arg));
2958 } else if (!IsSibcall && (!isTailCall || isByVal)) {
2959 assert(VA.isMemLoc());
2960 if (!StackPtr.getNode())
2961 StackPtr = DAG.getCopyFromReg(Chain, dl, RegInfo->getStackRegister(),
2963 MemOpChains.push_back(LowerMemOpCallTo(Chain, StackPtr, Arg,
2964 dl, DAG, VA, Flags));
2968 if (!MemOpChains.empty())
2969 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
2971 if (Subtarget->isPICStyleGOT()) {
2972 // ELF / PIC requires GOT in the EBX register before function calls via PLT
2975 RegsToPass.push_back(std::make_pair(unsigned(X86::EBX),
2976 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), getPointerTy())));
2978 // If we are tail calling and generating PIC/GOT style code load the
2979 // address of the callee into ECX. The value in ecx is used as target of
2980 // the tail jump. This is done to circumvent the ebx/callee-saved problem
2981 // for tail calls on PIC/GOT architectures. Normally we would just put the
2982 // address of GOT into ebx and then call target@PLT. But for tail calls
2983 // ebx would be restored (since ebx is callee saved) before jumping to the
2986 // Note: The actual moving to ECX is done further down.
2987 GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee);
2988 if (G && !G->getGlobal()->hasHiddenVisibility() &&
2989 !G->getGlobal()->hasProtectedVisibility())
2990 Callee = LowerGlobalAddress(Callee, DAG);
2991 else if (isa<ExternalSymbolSDNode>(Callee))
2992 Callee = LowerExternalSymbol(Callee, DAG);
2996 if (Is64Bit && isVarArg && !IsWin64 && !IsMustTail) {
2997 // From AMD64 ABI document:
2998 // For calls that may call functions that use varargs or stdargs
2999 // (prototype-less calls or calls to functions containing ellipsis (...) in
3000 // the declaration) %al is used as hidden argument to specify the number
3001 // of SSE registers used. The contents of %al do not need to match exactly
3002 // the number of registers, but must be an ubound on the number of SSE
3003 // registers used and is in the range 0 - 8 inclusive.
3005 // Count the number of XMM registers allocated.
3006 static const MCPhysReg XMMArgRegs[] = {
3007 X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,
3008 X86::XMM4, X86::XMM5, X86::XMM6, X86::XMM7
3010 unsigned NumXMMRegs = CCInfo.getFirstUnallocated(XMMArgRegs, 8);
3011 assert((Subtarget->hasSSE1() || !NumXMMRegs)
3012 && "SSE registers cannot be used when SSE is disabled");
3014 RegsToPass.push_back(std::make_pair(unsigned(X86::AL),
3015 DAG.getConstant(NumXMMRegs, MVT::i8)));
3018 if (isVarArg && IsMustTail) {
3019 const auto &Forwards = X86Info->getForwardedMustTailRegParms();
3020 for (const auto &F : Forwards) {
3021 SDValue Val = DAG.getCopyFromReg(Chain, dl, F.VReg, F.VT);
3022 RegsToPass.push_back(std::make_pair(unsigned(F.PReg), Val));
3026 // For tail calls lower the arguments to the 'real' stack slots. Sibcalls
3027 // don't need this because the eligibility check rejects calls that require
3028 // shuffling arguments passed in memory.
3029 if (!IsSibcall && isTailCall) {
3030 // Force all the incoming stack arguments to be loaded from the stack
3031 // before any new outgoing arguments are stored to the stack, because the
3032 // outgoing stack slots may alias the incoming argument stack slots, and
3033 // the alias isn't otherwise explicit. This is slightly more conservative
3034 // than necessary, because it means that each store effectively depends
3035 // on every argument instead of just those arguments it would clobber.
3036 SDValue ArgChain = DAG.getStackArgumentTokenFactor(Chain);
3038 SmallVector<SDValue, 8> MemOpChains2;
3041 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3042 CCValAssign &VA = ArgLocs[i];
3045 assert(VA.isMemLoc());
3046 SDValue Arg = OutVals[i];
3047 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3048 // Skip inalloca arguments. They don't require any work.
3049 if (Flags.isInAlloca())
3051 // Create frame index.
3052 int32_t Offset = VA.getLocMemOffset()+FPDiff;
3053 uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
3054 FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset, true);
3055 FIN = DAG.getFrameIndex(FI, getPointerTy());
3057 if (Flags.isByVal()) {
3058 // Copy relative to framepointer.
3059 SDValue Source = DAG.getIntPtrConstant(VA.getLocMemOffset());
3060 if (!StackPtr.getNode())
3061 StackPtr = DAG.getCopyFromReg(Chain, dl,
3062 RegInfo->getStackRegister(),
3064 Source = DAG.getNode(ISD::ADD, dl, getPointerTy(), StackPtr, Source);
3066 MemOpChains2.push_back(CreateCopyOfByValArgument(Source, FIN,
3070 // Store relative to framepointer.
3071 MemOpChains2.push_back(
3072 DAG.getStore(ArgChain, dl, Arg, FIN,
3073 MachinePointerInfo::getFixedStack(FI),
3078 if (!MemOpChains2.empty())
3079 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains2);
3081 // Store the return address to the appropriate stack slot.
3082 Chain = EmitTailCallStoreRetAddr(DAG, MF, Chain, RetAddrFrIdx,
3083 getPointerTy(), RegInfo->getSlotSize(),
3087 // Build a sequence of copy-to-reg nodes chained together with token chain
3088 // and flag operands which copy the outgoing args into registers.
3090 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
3091 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
3092 RegsToPass[i].second, InFlag);
3093 InFlag = Chain.getValue(1);
3096 if (DAG.getTarget().getCodeModel() == CodeModel::Large) {
3097 assert(Is64Bit && "Large code model is only legal in 64-bit mode.");
3098 // In the 64-bit large code model, we have to make all calls
3099 // through a register, since the call instruction's 32-bit
3100 // pc-relative offset may not be large enough to hold the whole
3102 } else if (Callee->getOpcode() == ISD::GlobalAddress) {
3103 // If the callee is a GlobalAddress node (quite common, every direct call
3104 // is) turn it into a TargetGlobalAddress node so that legalize doesn't hack
3106 GlobalAddressSDNode* G = cast<GlobalAddressSDNode>(Callee);
3108 // We should use extra load for direct calls to dllimported functions in
3110 const GlobalValue *GV = G->getGlobal();
3111 if (!GV->hasDLLImportStorageClass()) {
3112 unsigned char OpFlags = 0;
3113 bool ExtraLoad = false;
3114 unsigned WrapperKind = ISD::DELETED_NODE;
3116 // On ELF targets, in both X86-64 and X86-32 mode, direct calls to
3117 // external symbols most go through the PLT in PIC mode. If the symbol
3118 // has hidden or protected visibility, or if it is static or local, then
3119 // we don't need to use the PLT - we can directly call it.
3120 if (Subtarget->isTargetELF() &&
3121 DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
3122 GV->hasDefaultVisibility() && !GV->hasLocalLinkage()) {
3123 OpFlags = X86II::MO_PLT;
3124 } else if (Subtarget->isPICStyleStubAny() &&
3125 (GV->isDeclaration() || GV->isWeakForLinker()) &&
3126 (!Subtarget->getTargetTriple().isMacOSX() ||
3127 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3128 // PC-relative references to external symbols should go through $stub,
3129 // unless we're building with the leopard linker or later, which
3130 // automatically synthesizes these stubs.
3131 OpFlags = X86II::MO_DARWIN_STUB;
3132 } else if (Subtarget->isPICStyleRIPRel() && isa<Function>(GV) &&
3133 cast<Function>(GV)->hasFnAttribute(Attribute::NonLazyBind)) {
3134 // If the function is marked as non-lazy, generate an indirect call
3135 // which loads from the GOT directly. This avoids runtime overhead
3136 // at the cost of eager binding (and one extra byte of encoding).
3137 OpFlags = X86II::MO_GOTPCREL;
3138 WrapperKind = X86ISD::WrapperRIP;
3142 Callee = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(),
3143 G->getOffset(), OpFlags);
3145 // Add a wrapper if needed.
3146 if (WrapperKind != ISD::DELETED_NODE)
3147 Callee = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Callee);
3148 // Add extra indirection if needed.
3150 Callee = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Callee,
3151 MachinePointerInfo::getGOT(),
3152 false, false, false, 0);
3154 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
3155 unsigned char OpFlags = 0;
3157 // On ELF targets, in either X86-64 or X86-32 mode, direct calls to
3158 // external symbols should go through the PLT.
3159 if (Subtarget->isTargetELF() &&
3160 DAG.getTarget().getRelocationModel() == Reloc::PIC_) {
3161 OpFlags = X86II::MO_PLT;
3162 } else if (Subtarget->isPICStyleStubAny() &&
3163 (!Subtarget->getTargetTriple().isMacOSX() ||
3164 Subtarget->getTargetTriple().isMacOSXVersionLT(10, 5))) {
3165 // PC-relative references to external symbols should go through $stub,
3166 // unless we're building with the leopard linker or later, which
3167 // automatically synthesizes these stubs.
3168 OpFlags = X86II::MO_DARWIN_STUB;
3171 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy(),
3173 } else if (Subtarget->isTarget64BitILP32() &&
3174 Callee->getValueType(0) == MVT::i32) {
3175 // Zero-extend the 32-bit Callee address into a 64-bit according to x32 ABI
3176 Callee = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, Callee);
3179 // Returns a chain & a flag for retval copy to use.
3180 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
3181 SmallVector<SDValue, 8> Ops;
3183 if (!IsSibcall && isTailCall) {
3184 Chain = DAG.getCALLSEQ_END(Chain,
3185 DAG.getIntPtrConstant(NumBytesToPop, true),
3186 DAG.getIntPtrConstant(0, true), InFlag, dl);
3187 InFlag = Chain.getValue(1);
3190 Ops.push_back(Chain);
3191 Ops.push_back(Callee);
3194 Ops.push_back(DAG.getConstant(FPDiff, MVT::i32));
3196 // Add argument registers to the end of the list so that they are known live
3198 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
3199 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
3200 RegsToPass[i].second.getValueType()));
3202 // Add a register mask operand representing the call-preserved registers.
3203 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
3204 const uint32_t *Mask = TRI->getCallPreservedMask(CallConv);
3205 assert(Mask && "Missing call preserved mask for calling convention");
3206 Ops.push_back(DAG.getRegisterMask(Mask));
3208 if (InFlag.getNode())
3209 Ops.push_back(InFlag);
3213 //// If this is the first return lowered for this function, add the regs
3214 //// to the liveout set for the function.
3215 // This isn't right, although it's probably harmless on x86; liveouts
3216 // should be computed from returns not tail calls. Consider a void
3217 // function making a tail call to a function returning int.
3218 return DAG.getNode(X86ISD::TC_RETURN, dl, NodeTys, Ops);
3221 Chain = DAG.getNode(X86ISD::CALL, dl, NodeTys, Ops);
3222 InFlag = Chain.getValue(1);
3224 // Create the CALLSEQ_END node.
3225 unsigned NumBytesForCalleeToPop;
3226 if (X86::isCalleePop(CallConv, Is64Bit, isVarArg,
3227 DAG.getTarget().Options.GuaranteedTailCallOpt))
3228 NumBytesForCalleeToPop = NumBytes; // Callee pops everything
3229 else if (!Is64Bit && !IsTailCallConvention(CallConv) &&
3230 !Subtarget->getTargetTriple().isOSMSVCRT() &&
3231 SR == StackStructReturn)
3232 // If this is a call to a struct-return function, the callee
3233 // pops the hidden struct pointer, so we have to push it back.
3234 // This is common for Darwin/X86, Linux & Mingw32 targets.
3235 // For MSVC Win32 targets, the caller pops the hidden struct pointer.
3236 NumBytesForCalleeToPop = 4;
3238 NumBytesForCalleeToPop = 0; // Callee pops nothing.
3240 // Returns a flag for retval copy to use.
3242 Chain = DAG.getCALLSEQ_END(Chain,
3243 DAG.getIntPtrConstant(NumBytesToPop, true),
3244 DAG.getIntPtrConstant(NumBytesForCalleeToPop,
3247 InFlag = Chain.getValue(1);
3250 // Handle result values, copying them out of physregs into vregs that we
3252 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
3253 Ins, dl, DAG, InVals);
3256 //===----------------------------------------------------------------------===//
3257 // Fast Calling Convention (tail call) implementation
3258 //===----------------------------------------------------------------------===//
3260 // Like std call, callee cleans arguments, convention except that ECX is
3261 // reserved for storing the tail called function address. Only 2 registers are
3262 // free for argument passing (inreg). Tail call optimization is performed
3264 // * tailcallopt is enabled
3265 // * caller/callee are fastcc
3266 // On X86_64 architecture with GOT-style position independent code only local
3267 // (within module) calls are supported at the moment.
3268 // To keep the stack aligned according to platform abi the function
3269 // GetAlignedArgumentStackSize ensures that argument delta is always multiples
3270 // of stack alignment. (Dynamic linkers need this - darwin's dyld for example)
3271 // If a tail called function callee has more arguments than the caller the
3272 // caller needs to make sure that there is room to move the RETADDR to. This is
3273 // achieved by reserving an area the size of the argument delta right after the
3274 // original RETADDR, but before the saved framepointer or the spilled registers
3275 // e.g. caller(arg1, arg2) calls callee(arg1, arg2,arg3,arg4)
3287 /// GetAlignedArgumentStackSize - Make the stack size align e.g 16n + 12 aligned
3288 /// for a 16 byte align requirement.
3290 X86TargetLowering::GetAlignedArgumentStackSize(unsigned StackSize,
3291 SelectionDAG& DAG) const {
3292 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3293 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
3294 unsigned StackAlignment = TFI.getStackAlignment();
3295 uint64_t AlignMask = StackAlignment - 1;
3296 int64_t Offset = StackSize;
3297 unsigned SlotSize = RegInfo->getSlotSize();
3298 if ( (Offset & AlignMask) <= (StackAlignment - SlotSize) ) {
3299 // Number smaller than 12 so just add the difference.
3300 Offset += ((StackAlignment - SlotSize) - (Offset & AlignMask));
3302 // Mask out lower bits, add stackalignment once plus the 12 bytes.
3303 Offset = ((~AlignMask) & Offset) + StackAlignment +
3304 (StackAlignment-SlotSize);
3309 /// MatchingStackOffset - Return true if the given stack call argument is
3310 /// already available in the same position (relatively) of the caller's
3311 /// incoming argument stack.
3313 bool MatchingStackOffset(SDValue Arg, unsigned Offset, ISD::ArgFlagsTy Flags,
3314 MachineFrameInfo *MFI, const MachineRegisterInfo *MRI,
3315 const X86InstrInfo *TII) {
3316 unsigned Bytes = Arg.getValueType().getSizeInBits() / 8;
3318 if (Arg.getOpcode() == ISD::CopyFromReg) {
3319 unsigned VR = cast<RegisterSDNode>(Arg.getOperand(1))->getReg();
3320 if (!TargetRegisterInfo::isVirtualRegister(VR))
3322 MachineInstr *Def = MRI->getVRegDef(VR);
3325 if (!Flags.isByVal()) {
3326 if (!TII->isLoadFromStackSlot(Def, FI))
3329 unsigned Opcode = Def->getOpcode();
3330 if ((Opcode == X86::LEA32r || Opcode == X86::LEA64r ||
3331 Opcode == X86::LEA64_32r) &&
3332 Def->getOperand(1).isFI()) {
3333 FI = Def->getOperand(1).getIndex();
3334 Bytes = Flags.getByValSize();
3338 } else if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(Arg)) {
3339 if (Flags.isByVal())
3340 // ByVal argument is passed in as a pointer but it's now being
3341 // dereferenced. e.g.
3342 // define @foo(%struct.X* %A) {
3343 // tail call @bar(%struct.X* byval %A)
3346 SDValue Ptr = Ld->getBasePtr();
3347 FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr);
3350 FI = FINode->getIndex();
3351 } else if (Arg.getOpcode() == ISD::FrameIndex && Flags.isByVal()) {
3352 FrameIndexSDNode *FINode = cast<FrameIndexSDNode>(Arg);
3353 FI = FINode->getIndex();
3354 Bytes = Flags.getByValSize();
3358 assert(FI != INT_MAX);
3359 if (!MFI->isFixedObjectIndex(FI))
3361 return Offset == MFI->getObjectOffset(FI) && Bytes == MFI->getObjectSize(FI);
3364 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
3365 /// for tail call optimization. Targets which want to do tail call
3366 /// optimization should implement this function.
3368 X86TargetLowering::IsEligibleForTailCallOptimization(SDValue Callee,
3369 CallingConv::ID CalleeCC,
3371 bool isCalleeStructRet,
3372 bool isCallerStructRet,
3374 const SmallVectorImpl<ISD::OutputArg> &Outs,
3375 const SmallVectorImpl<SDValue> &OutVals,
3376 const SmallVectorImpl<ISD::InputArg> &Ins,
3377 SelectionDAG &DAG) const {
3378 if (!IsTailCallConvention(CalleeCC) && !IsCCallConvention(CalleeCC))
3381 // If -tailcallopt is specified, make fastcc functions tail-callable.
3382 const MachineFunction &MF = DAG.getMachineFunction();
3383 const Function *CallerF = MF.getFunction();
3385 // If the function return type is x86_fp80 and the callee return type is not,
3386 // then the FP_EXTEND of the call result is not a nop. It's not safe to
3387 // perform a tailcall optimization here.
3388 if (CallerF->getReturnType()->isX86_FP80Ty() && !RetTy->isX86_FP80Ty())
3391 CallingConv::ID CallerCC = CallerF->getCallingConv();
3392 bool CCMatch = CallerCC == CalleeCC;
3393 bool IsCalleeWin64 = Subtarget->isCallingConvWin64(CalleeCC);
3394 bool IsCallerWin64 = Subtarget->isCallingConvWin64(CallerCC);
3396 if (DAG.getTarget().Options.GuaranteedTailCallOpt) {
3397 if (IsTailCallConvention(CalleeCC) && CCMatch)
3402 // Look for obvious safe cases to perform tail call optimization that do not
3403 // require ABI changes. This is what gcc calls sibcall.
3405 // Can't do sibcall if stack needs to be dynamically re-aligned. PEI needs to
3406 // emit a special epilogue.
3407 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3408 if (RegInfo->needsStackRealignment(MF))
3411 // Also avoid sibcall optimization if either caller or callee uses struct
3412 // return semantics.
3413 if (isCalleeStructRet || isCallerStructRet)
3416 // An stdcall/thiscall caller is expected to clean up its arguments; the
3417 // callee isn't going to do that.
3418 // FIXME: this is more restrictive than needed. We could produce a tailcall
3419 // when the stack adjustment matches. For example, with a thiscall that takes
3420 // only one argument.
3421 if (!CCMatch && (CallerCC == CallingConv::X86_StdCall ||
3422 CallerCC == CallingConv::X86_ThisCall))
3425 // Do not sibcall optimize vararg calls unless all arguments are passed via
3427 if (isVarArg && !Outs.empty()) {
3429 // Optimizing for varargs on Win64 is unlikely to be safe without
3430 // additional testing.
3431 if (IsCalleeWin64 || IsCallerWin64)
3434 SmallVector<CCValAssign, 16> ArgLocs;
3435 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3438 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3439 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i)
3440 if (!ArgLocs[i].isRegLoc())
3444 // If the call result is in ST0 / ST1, it needs to be popped off the x87
3445 // stack. Therefore, if it's not used by the call it is not safe to optimize
3446 // this into a sibcall.
3447 bool Unused = false;
3448 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
3455 SmallVector<CCValAssign, 16> RVLocs;
3456 CCState CCInfo(CalleeCC, false, DAG.getMachineFunction(), RVLocs,
3458 CCInfo.AnalyzeCallResult(Ins, RetCC_X86);
3459 for (unsigned i = 0, e = RVLocs.size(); i != e; ++i) {
3460 CCValAssign &VA = RVLocs[i];
3461 if (VA.getLocReg() == X86::FP0 || VA.getLocReg() == X86::FP1)
3466 // If the calling conventions do not match, then we'd better make sure the
3467 // results are returned in the same way as what the caller expects.
3469 SmallVector<CCValAssign, 16> RVLocs1;
3470 CCState CCInfo1(CalleeCC, false, DAG.getMachineFunction(), RVLocs1,
3472 CCInfo1.AnalyzeCallResult(Ins, RetCC_X86);
3474 SmallVector<CCValAssign, 16> RVLocs2;
3475 CCState CCInfo2(CallerCC, false, DAG.getMachineFunction(), RVLocs2,
3477 CCInfo2.AnalyzeCallResult(Ins, RetCC_X86);
3479 if (RVLocs1.size() != RVLocs2.size())
3481 for (unsigned i = 0, e = RVLocs1.size(); i != e; ++i) {
3482 if (RVLocs1[i].isRegLoc() != RVLocs2[i].isRegLoc())
3484 if (RVLocs1[i].getLocInfo() != RVLocs2[i].getLocInfo())
3486 if (RVLocs1[i].isRegLoc()) {
3487 if (RVLocs1[i].getLocReg() != RVLocs2[i].getLocReg())
3490 if (RVLocs1[i].getLocMemOffset() != RVLocs2[i].getLocMemOffset())
3496 // If the callee takes no arguments then go on to check the results of the
3498 if (!Outs.empty()) {
3499 // Check if stack adjustment is needed. For now, do not do this if any
3500 // argument is passed on the stack.
3501 SmallVector<CCValAssign, 16> ArgLocs;
3502 CCState CCInfo(CalleeCC, isVarArg, DAG.getMachineFunction(), ArgLocs,
3505 // Allocate shadow area for Win64
3507 CCInfo.AllocateStack(32, 8);
3509 CCInfo.AnalyzeCallOperands(Outs, CC_X86);
3510 if (CCInfo.getNextStackOffset()) {
3511 MachineFunction &MF = DAG.getMachineFunction();
3512 if (MF.getInfo<X86MachineFunctionInfo>()->getBytesToPopOnReturn())
3515 // Check if the arguments are already laid out in the right way as
3516 // the caller's fixed stack objects.
3517 MachineFrameInfo *MFI = MF.getFrameInfo();
3518 const MachineRegisterInfo *MRI = &MF.getRegInfo();
3519 const X86InstrInfo *TII = Subtarget->getInstrInfo();
3520 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3521 CCValAssign &VA = ArgLocs[i];
3522 SDValue Arg = OutVals[i];
3523 ISD::ArgFlagsTy Flags = Outs[i].Flags;
3524 if (VA.getLocInfo() == CCValAssign::Indirect)
3526 if (!VA.isRegLoc()) {
3527 if (!MatchingStackOffset(Arg, VA.getLocMemOffset(), Flags,
3534 // If the tailcall address may be in a register, then make sure it's
3535 // possible to register allocate for it. In 32-bit, the call address can
3536 // only target EAX, EDX, or ECX since the tail call must be scheduled after
3537 // callee-saved registers are restored. These happen to be the same
3538 // registers used to pass 'inreg' arguments so watch out for those.
3539 if (!Subtarget->is64Bit() &&
3540 ((!isa<GlobalAddressSDNode>(Callee) &&
3541 !isa<ExternalSymbolSDNode>(Callee)) ||
3542 DAG.getTarget().getRelocationModel() == Reloc::PIC_)) {
3543 unsigned NumInRegs = 0;
3544 // In PIC we need an extra register to formulate the address computation
3546 unsigned MaxInRegs =
3547 (DAG.getTarget().getRelocationModel() == Reloc::PIC_) ? 2 : 3;
3549 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
3550 CCValAssign &VA = ArgLocs[i];
3553 unsigned Reg = VA.getLocReg();
3556 case X86::EAX: case X86::EDX: case X86::ECX:
3557 if (++NumInRegs == MaxInRegs)
3569 X86TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo,
3570 const TargetLibraryInfo *libInfo) const {
3571 return X86::createFastISel(funcInfo, libInfo);
3574 //===----------------------------------------------------------------------===//
3575 // Other Lowering Hooks
3576 //===----------------------------------------------------------------------===//
3578 static bool MayFoldLoad(SDValue Op) {
3579 return Op.hasOneUse() && ISD::isNormalLoad(Op.getNode());
3582 static bool MayFoldIntoStore(SDValue Op) {
3583 return Op.hasOneUse() && ISD::isNormalStore(*Op.getNode()->use_begin());
3586 static bool isTargetShuffle(unsigned Opcode) {
3588 default: return false;
3589 case X86ISD::BLENDI:
3590 case X86ISD::PSHUFB:
3591 case X86ISD::PSHUFD:
3592 case X86ISD::PSHUFHW:
3593 case X86ISD::PSHUFLW:
3595 case X86ISD::PALIGNR:
3596 case X86ISD::MOVLHPS:
3597 case X86ISD::MOVLHPD:
3598 case X86ISD::MOVHLPS:
3599 case X86ISD::MOVLPS:
3600 case X86ISD::MOVLPD:
3601 case X86ISD::MOVSHDUP:
3602 case X86ISD::MOVSLDUP:
3603 case X86ISD::MOVDDUP:
3606 case X86ISD::UNPCKL:
3607 case X86ISD::UNPCKH:
3608 case X86ISD::VPERMILPI:
3609 case X86ISD::VPERM2X128:
3610 case X86ISD::VPERMI:
3615 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3616 SDValue V1, SelectionDAG &DAG) {
3618 default: llvm_unreachable("Unknown x86 shuffle node");
3619 case X86ISD::MOVSHDUP:
3620 case X86ISD::MOVSLDUP:
3621 case X86ISD::MOVDDUP:
3622 return DAG.getNode(Opc, dl, VT, V1);
3626 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3627 SDValue V1, unsigned TargetMask,
3628 SelectionDAG &DAG) {
3630 default: llvm_unreachable("Unknown x86 shuffle node");
3631 case X86ISD::PSHUFD:
3632 case X86ISD::PSHUFHW:
3633 case X86ISD::PSHUFLW:
3634 case X86ISD::VPERMILPI:
3635 case X86ISD::VPERMI:
3636 return DAG.getNode(Opc, dl, VT, V1, DAG.getConstant(TargetMask, MVT::i8));
3640 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3641 SDValue V1, SDValue V2, unsigned TargetMask,
3642 SelectionDAG &DAG) {
3644 default: llvm_unreachable("Unknown x86 shuffle node");
3645 case X86ISD::PALIGNR:
3646 case X86ISD::VALIGN:
3648 case X86ISD::VPERM2X128:
3649 return DAG.getNode(Opc, dl, VT, V1, V2,
3650 DAG.getConstant(TargetMask, MVT::i8));
3654 static SDValue getTargetShuffleNode(unsigned Opc, SDLoc dl, EVT VT,
3655 SDValue V1, SDValue V2, SelectionDAG &DAG) {
3657 default: llvm_unreachable("Unknown x86 shuffle node");
3658 case X86ISD::MOVLHPS:
3659 case X86ISD::MOVLHPD:
3660 case X86ISD::MOVHLPS:
3661 case X86ISD::MOVLPS:
3662 case X86ISD::MOVLPD:
3665 case X86ISD::UNPCKL:
3666 case X86ISD::UNPCKH:
3667 return DAG.getNode(Opc, dl, VT, V1, V2);
3671 SDValue X86TargetLowering::getReturnAddressFrameIndex(SelectionDAG &DAG) const {
3672 MachineFunction &MF = DAG.getMachineFunction();
3673 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
3674 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
3675 int ReturnAddrIndex = FuncInfo->getRAIndex();
3677 if (ReturnAddrIndex == 0) {
3678 // Set up a frame object for the return address.
3679 unsigned SlotSize = RegInfo->getSlotSize();
3680 ReturnAddrIndex = MF.getFrameInfo()->CreateFixedObject(SlotSize,
3683 FuncInfo->setRAIndex(ReturnAddrIndex);
3686 return DAG.getFrameIndex(ReturnAddrIndex, getPointerTy());
3689 bool X86::isOffsetSuitableForCodeModel(int64_t Offset, CodeModel::Model M,
3690 bool hasSymbolicDisplacement) {
3691 // Offset should fit into 32 bit immediate field.
3692 if (!isInt<32>(Offset))
3695 // If we don't have a symbolic displacement - we don't have any extra
3697 if (!hasSymbolicDisplacement)
3700 // FIXME: Some tweaks might be needed for medium code model.
3701 if (M != CodeModel::Small && M != CodeModel::Kernel)
3704 // For small code model we assume that latest object is 16MB before end of 31
3705 // bits boundary. We may also accept pretty large negative constants knowing
3706 // that all objects are in the positive half of address space.
3707 if (M == CodeModel::Small && Offset < 16*1024*1024)
3710 // For kernel code model we know that all object resist in the negative half
3711 // of 32bits address space. We may not accept negative offsets, since they may
3712 // be just off and we may accept pretty large positive ones.
3713 if (M == CodeModel::Kernel && Offset >= 0)
3719 /// isCalleePop - Determines whether the callee is required to pop its
3720 /// own arguments. Callee pop is necessary to support tail calls.
3721 bool X86::isCalleePop(CallingConv::ID CallingConv,
3722 bool is64Bit, bool IsVarArg, bool TailCallOpt) {
3723 switch (CallingConv) {
3726 case CallingConv::X86_StdCall:
3727 case CallingConv::X86_FastCall:
3728 case CallingConv::X86_ThisCall:
3730 case CallingConv::Fast:
3731 case CallingConv::GHC:
3732 case CallingConv::HiPE:
3739 /// \brief Return true if the condition is an unsigned comparison operation.
3740 static bool isX86CCUnsigned(unsigned X86CC) {
3742 default: llvm_unreachable("Invalid integer condition!");
3743 case X86::COND_E: return true;
3744 case X86::COND_G: return false;
3745 case X86::COND_GE: return false;
3746 case X86::COND_L: return false;
3747 case X86::COND_LE: return false;
3748 case X86::COND_NE: return true;
3749 case X86::COND_B: return true;
3750 case X86::COND_A: return true;
3751 case X86::COND_BE: return true;
3752 case X86::COND_AE: return true;
3754 llvm_unreachable("covered switch fell through?!");
3757 /// TranslateX86CC - do a one to one translation of a ISD::CondCode to the X86
3758 /// specific condition code, returning the condition code and the LHS/RHS of the
3759 /// comparison to make.
3760 static unsigned TranslateX86CC(ISD::CondCode SetCCOpcode, bool isFP,
3761 SDValue &LHS, SDValue &RHS, SelectionDAG &DAG) {
3763 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS)) {
3764 if (SetCCOpcode == ISD::SETGT && RHSC->isAllOnesValue()) {
3765 // X > -1 -> X == 0, jump !sign.
3766 RHS = DAG.getConstant(0, RHS.getValueType());
3767 return X86::COND_NS;
3769 if (SetCCOpcode == ISD::SETLT && RHSC->isNullValue()) {
3770 // X < 0 -> X == 0, jump on sign.
3773 if (SetCCOpcode == ISD::SETLT && RHSC->getZExtValue() == 1) {
3775 RHS = DAG.getConstant(0, RHS.getValueType());
3776 return X86::COND_LE;
3780 switch (SetCCOpcode) {
3781 default: llvm_unreachable("Invalid integer condition!");
3782 case ISD::SETEQ: return X86::COND_E;
3783 case ISD::SETGT: return X86::COND_G;
3784 case ISD::SETGE: return X86::COND_GE;
3785 case ISD::SETLT: return X86::COND_L;
3786 case ISD::SETLE: return X86::COND_LE;
3787 case ISD::SETNE: return X86::COND_NE;
3788 case ISD::SETULT: return X86::COND_B;
3789 case ISD::SETUGT: return X86::COND_A;
3790 case ISD::SETULE: return X86::COND_BE;
3791 case ISD::SETUGE: return X86::COND_AE;
3795 // First determine if it is required or is profitable to flip the operands.
3797 // If LHS is a foldable load, but RHS is not, flip the condition.
3798 if (ISD::isNON_EXTLoad(LHS.getNode()) &&
3799 !ISD::isNON_EXTLoad(RHS.getNode())) {
3800 SetCCOpcode = getSetCCSwappedOperands(SetCCOpcode);
3801 std::swap(LHS, RHS);
3804 switch (SetCCOpcode) {
3810 std::swap(LHS, RHS);
3814 // On a floating point condition, the flags are set as follows:
3816 // 0 | 0 | 0 | X > Y
3817 // 0 | 0 | 1 | X < Y
3818 // 1 | 0 | 0 | X == Y
3819 // 1 | 1 | 1 | unordered
3820 switch (SetCCOpcode) {
3821 default: llvm_unreachable("Condcode should be pre-legalized away");
3823 case ISD::SETEQ: return X86::COND_E;
3824 case ISD::SETOLT: // flipped
3826 case ISD::SETGT: return X86::COND_A;
3827 case ISD::SETOLE: // flipped
3829 case ISD::SETGE: return X86::COND_AE;
3830 case ISD::SETUGT: // flipped
3832 case ISD::SETLT: return X86::COND_B;
3833 case ISD::SETUGE: // flipped
3835 case ISD::SETLE: return X86::COND_BE;
3837 case ISD::SETNE: return X86::COND_NE;
3838 case ISD::SETUO: return X86::COND_P;
3839 case ISD::SETO: return X86::COND_NP;
3841 case ISD::SETUNE: return X86::COND_INVALID;
3845 /// hasFPCMov - is there a floating point cmov for the specific X86 condition
3846 /// code. Current x86 isa includes the following FP cmov instructions:
3847 /// fcmovb, fcomvbe, fcomve, fcmovu, fcmovae, fcmova, fcmovne, fcmovnu.
3848 static bool hasFPCMov(unsigned X86CC) {
3864 /// isFPImmLegal - Returns true if the target can instruction select the
3865 /// specified FP immediate natively. If false, the legalizer will
3866 /// materialize the FP immediate as a load from a constant pool.
3867 bool X86TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
3868 for (unsigned i = 0, e = LegalFPImmediates.size(); i != e; ++i) {
3869 if (Imm.bitwiseIsEqual(LegalFPImmediates[i]))
3875 bool X86TargetLowering::shouldReduceLoadWidth(SDNode *Load,
3876 ISD::LoadExtType ExtTy,
3878 // "ELF Handling for Thread-Local Storage" specifies that R_X86_64_GOTTPOFF
3879 // relocation target a movq or addq instruction: don't let the load shrink.
3880 SDValue BasePtr = cast<LoadSDNode>(Load)->getBasePtr();
3881 if (BasePtr.getOpcode() == X86ISD::WrapperRIP)
3882 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(BasePtr.getOperand(0)))
3883 return GA->getTargetFlags() != X86II::MO_GOTTPOFF;
3887 /// \brief Returns true if it is beneficial to convert a load of a constant
3888 /// to just the constant itself.
3889 bool X86TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm,
3891 assert(Ty->isIntegerTy());
3893 unsigned BitSize = Ty->getPrimitiveSizeInBits();
3894 if (BitSize == 0 || BitSize > 64)
3899 bool X86TargetLowering::isExtractSubvectorCheap(EVT ResVT,
3900 unsigned Index) const {
3901 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT))
3904 return (Index == 0 || Index == ResVT.getVectorNumElements());
3907 bool X86TargetLowering::isCheapToSpeculateCttz() const {
3908 // Speculate cttz only if we can directly use TZCNT.
3909 return Subtarget->hasBMI();
3912 bool X86TargetLowering::isCheapToSpeculateCtlz() const {
3913 // Speculate ctlz only if we can directly use LZCNT.
3914 return Subtarget->hasLZCNT();
3917 /// isUndefOrInRange - Return true if Val is undef or if its value falls within
3918 /// the specified range (L, H].
3919 static bool isUndefOrInRange(int Val, int Low, int Hi) {
3920 return (Val < 0) || (Val >= Low && Val < Hi);
3923 /// isUndefOrEqual - Val is either less than zero (undef) or equal to the
3924 /// specified value.
3925 static bool isUndefOrEqual(int Val, int CmpVal) {
3926 return (Val < 0 || Val == CmpVal);
3929 /// isSequentialOrUndefInRange - Return true if every element in Mask, beginning
3930 /// from position Pos and ending in Pos+Size, falls within the specified
3931 /// sequential range (Low, Low+Size]. or is undef.
3932 static bool isSequentialOrUndefInRange(ArrayRef<int> Mask,
3933 unsigned Pos, unsigned Size, int Low) {
3934 for (unsigned i = Pos, e = Pos+Size; i != e; ++i, ++Low)
3935 if (!isUndefOrEqual(Mask[i], Low))
3940 /// isPSHUFDMask - Return true if the node specifies a shuffle of elements that
3941 /// is suitable for input to PSHUFD. That is, it doesn't reference the other
3942 /// operand - by default will match for first operand.
3943 static bool isPSHUFDMask(ArrayRef<int> Mask, MVT VT,
3944 bool TestSecondOperand = false) {
3945 if (VT != MVT::v4f32 && VT != MVT::v4i32 &&
3946 VT != MVT::v2f64 && VT != MVT::v2i64)
3949 unsigned NumElems = VT.getVectorNumElements();
3950 unsigned Lo = TestSecondOperand ? NumElems : 0;
3951 unsigned Hi = Lo + NumElems;
3953 for (unsigned i = 0; i < NumElems; ++i)
3954 if (!isUndefOrInRange(Mask[i], (int)Lo, (int)Hi))
3960 /// isPSHUFHWMask - Return true if the node specifies a shuffle of elements that
3961 /// is suitable for input to PSHUFHW.
3962 static bool isPSHUFHWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3963 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3966 // Lower quadword copied in order or undef.
3967 if (!isSequentialOrUndefInRange(Mask, 0, 4, 0))
3970 // Upper quadword shuffled.
3971 for (unsigned i = 4; i != 8; ++i)
3972 if (!isUndefOrInRange(Mask[i], 4, 8))
3975 if (VT == MVT::v16i16) {
3976 // Lower quadword copied in order or undef.
3977 if (!isSequentialOrUndefInRange(Mask, 8, 4, 8))
3980 // Upper quadword shuffled.
3981 for (unsigned i = 12; i != 16; ++i)
3982 if (!isUndefOrInRange(Mask[i], 12, 16))
3989 /// isPSHUFLWMask - Return true if the node specifies a shuffle of elements that
3990 /// is suitable for input to PSHUFLW.
3991 static bool isPSHUFLWMask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
3992 if (VT != MVT::v8i16 && (!HasInt256 || VT != MVT::v16i16))
3995 // Upper quadword copied in order.
3996 if (!isSequentialOrUndefInRange(Mask, 4, 4, 4))
3999 // Lower quadword shuffled.
4000 for (unsigned i = 0; i != 4; ++i)
4001 if (!isUndefOrInRange(Mask[i], 0, 4))
4004 if (VT == MVT::v16i16) {
4005 // Upper quadword copied in order.
4006 if (!isSequentialOrUndefInRange(Mask, 12, 4, 12))
4009 // Lower quadword shuffled.
4010 for (unsigned i = 8; i != 12; ++i)
4011 if (!isUndefOrInRange(Mask[i], 8, 12))
4018 /// \brief Return true if the mask specifies a shuffle of elements that is
4019 /// suitable for input to intralane (palignr) or interlane (valign) vector
4021 static bool isAlignrMask(ArrayRef<int> Mask, MVT VT, bool InterLane) {
4022 unsigned NumElts = VT.getVectorNumElements();
4023 unsigned NumLanes = InterLane ? 1: VT.getSizeInBits()/128;
4024 unsigned NumLaneElts = NumElts/NumLanes;
4026 // Do not handle 64-bit element shuffles with palignr.
4027 if (NumLaneElts == 2)
4030 for (unsigned l = 0; l != NumElts; l+=NumLaneElts) {
4032 for (i = 0; i != NumLaneElts; ++i) {
4037 // Lane is all undef, go to next lane
4038 if (i == NumLaneElts)
4041 int Start = Mask[i+l];
4043 // Make sure its in this lane in one of the sources
4044 if (!isUndefOrInRange(Start, l, l+NumLaneElts) &&
4045 !isUndefOrInRange(Start, l+NumElts, l+NumElts+NumLaneElts))
4048 // If not lane 0, then we must match lane 0
4049 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Start, Mask[i]+l))
4052 // Correct second source to be contiguous with first source
4053 if (Start >= (int)NumElts)
4054 Start -= NumElts - NumLaneElts;
4056 // Make sure we're shifting in the right direction.
4057 if (Start <= (int)(i+l))
4062 // Check the rest of the elements to see if they are consecutive.
4063 for (++i; i != NumLaneElts; ++i) {
4064 int Idx = Mask[i+l];
4066 // Make sure its in this lane
4067 if (!isUndefOrInRange(Idx, l, l+NumLaneElts) &&
4068 !isUndefOrInRange(Idx, l+NumElts, l+NumElts+NumLaneElts))
4071 // If not lane 0, then we must match lane 0
4072 if (l != 0 && Mask[i] >= 0 && !isUndefOrEqual(Idx, Mask[i]+l))
4075 if (Idx >= (int)NumElts)
4076 Idx -= NumElts - NumLaneElts;
4078 if (!isUndefOrEqual(Idx, Start+i))
4087 /// \brief Return true if the node specifies a shuffle of elements that is
4088 /// suitable for input to PALIGNR.
4089 static bool isPALIGNRMask(ArrayRef<int> Mask, MVT VT,
4090 const X86Subtarget *Subtarget) {
4091 if ((VT.is128BitVector() && !Subtarget->hasSSSE3()) ||
4092 (VT.is256BitVector() && !Subtarget->hasInt256()) ||
4093 VT.is512BitVector())
4094 // FIXME: Add AVX512BW.
4097 return isAlignrMask(Mask, VT, false);
4100 /// \brief Return true if the node specifies a shuffle of elements that is
4101 /// suitable for input to VALIGN.
4102 static bool isVALIGNMask(ArrayRef<int> Mask, MVT VT,
4103 const X86Subtarget *Subtarget) {
4104 // FIXME: Add AVX512VL.
4105 if (!VT.is512BitVector() || !Subtarget->hasAVX512())
4107 return isAlignrMask(Mask, VT, true);
4110 /// CommuteVectorShuffleMask - Change values in a shuffle permute mask assuming
4111 /// the two vector operands have swapped position.
4112 static void CommuteVectorShuffleMask(SmallVectorImpl<int> &Mask,
4113 unsigned NumElems) {
4114 for (unsigned i = 0; i != NumElems; ++i) {
4118 else if (idx < (int)NumElems)
4119 Mask[i] = idx + NumElems;
4121 Mask[i] = idx - NumElems;
4125 /// isSHUFPMask - Return true if the specified VECTOR_SHUFFLE operand
4126 /// specifies a shuffle of elements that is suitable for input to 128/256-bit
4127 /// SHUFPS and SHUFPD. If Commuted is true, then it checks for sources to be
4128 /// reverse of what x86 shuffles want.
4129 static bool isSHUFPMask(ArrayRef<int> Mask, MVT VT, bool Commuted = false) {
4131 unsigned NumElems = VT.getVectorNumElements();
4132 unsigned NumLanes = VT.getSizeInBits()/128;
4133 unsigned NumLaneElems = NumElems/NumLanes;
4135 if (NumLaneElems != 2 && NumLaneElems != 4)
4138 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4139 bool symmetricMaskRequired =
4140 (VT.getSizeInBits() >= 256) && (EltSize == 32);
4142 // VSHUFPSY divides the resulting vector into 4 chunks.
4143 // The sources are also splitted into 4 chunks, and each destination
4144 // chunk must come from a different source chunk.
4146 // SRC1 => X7 X6 X5 X4 X3 X2 X1 X0
4147 // SRC2 => Y7 Y6 Y5 Y4 Y3 Y2 Y1 Y9
4149 // DST => Y7..Y4, Y7..Y4, X7..X4, X7..X4,
4150 // Y3..Y0, Y3..Y0, X3..X0, X3..X0
4152 // VSHUFPDY divides the resulting vector into 4 chunks.
4153 // The sources are also splitted into 4 chunks, and each destination
4154 // chunk must come from a different source chunk.
4156 // SRC1 => X3 X2 X1 X0
4157 // SRC2 => Y3 Y2 Y1 Y0
4159 // DST => Y3..Y2, X3..X2, Y1..Y0, X1..X0
4161 SmallVector<int, 4> MaskVal(NumLaneElems, -1);
4162 unsigned HalfLaneElems = NumLaneElems/2;
4163 for (unsigned l = 0; l != NumElems; l += NumLaneElems) {
4164 for (unsigned i = 0; i != NumLaneElems; ++i) {
4165 int Idx = Mask[i+l];
4166 unsigned RngStart = l + ((Commuted == (i<HalfLaneElems)) ? NumElems : 0);
4167 if (!isUndefOrInRange(Idx, RngStart, RngStart+NumLaneElems))
4169 // For VSHUFPSY, the mask of the second half must be the same as the
4170 // first but with the appropriate offsets. This works in the same way as
4171 // VPERMILPS works with masks.
4172 if (!symmetricMaskRequired || Idx < 0)
4174 if (MaskVal[i] < 0) {
4175 MaskVal[i] = Idx - l;
4178 if ((signed)(Idx - l) != MaskVal[i])
4186 /// isMOVHLPSMask - Return true if the specified VECTOR_SHUFFLE operand
4187 /// specifies a shuffle of elements that is suitable for input to MOVHLPS.
4188 static bool isMOVHLPSMask(ArrayRef<int> Mask, MVT VT) {
4189 if (!VT.is128BitVector())
4192 unsigned NumElems = VT.getVectorNumElements();
4197 // Expect bit0 == 6, bit1 == 7, bit2 == 2, bit3 == 3
4198 return isUndefOrEqual(Mask[0], 6) &&
4199 isUndefOrEqual(Mask[1], 7) &&
4200 isUndefOrEqual(Mask[2], 2) &&
4201 isUndefOrEqual(Mask[3], 3);
4204 /// isMOVHLPS_v_undef_Mask - Special case of isMOVHLPSMask for canonical form
4205 /// of vector_shuffle v, v, <2, 3, 2, 3>, i.e. vector_shuffle v, undef,
4207 static bool isMOVHLPS_v_undef_Mask(ArrayRef<int> Mask, MVT VT) {
4208 if (!VT.is128BitVector())
4211 unsigned NumElems = VT.getVectorNumElements();
4216 return isUndefOrEqual(Mask[0], 2) &&
4217 isUndefOrEqual(Mask[1], 3) &&
4218 isUndefOrEqual(Mask[2], 2) &&
4219 isUndefOrEqual(Mask[3], 3);
4222 /// isMOVLPMask - Return true if the specified VECTOR_SHUFFLE operand
4223 /// specifies a shuffle of elements that is suitable for input to MOVLP{S|D}.
4224 static bool isMOVLPMask(ArrayRef<int> Mask, MVT VT) {
4225 if (!VT.is128BitVector())
4228 unsigned NumElems = VT.getVectorNumElements();
4230 if (NumElems != 2 && NumElems != 4)
4233 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4234 if (!isUndefOrEqual(Mask[i], i + NumElems))
4237 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
4238 if (!isUndefOrEqual(Mask[i], i))
4244 /// isMOVLHPSMask - Return true if the specified VECTOR_SHUFFLE operand
4245 /// specifies a shuffle of elements that is suitable for input to MOVLHPS.
4246 static bool isMOVLHPSMask(ArrayRef<int> Mask, MVT VT) {
4247 if (!VT.is128BitVector())
4250 unsigned NumElems = VT.getVectorNumElements();
4252 if (NumElems != 2 && NumElems != 4)
4255 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4256 if (!isUndefOrEqual(Mask[i], i))
4259 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
4260 if (!isUndefOrEqual(Mask[i + e], i + NumElems))
4266 /// isINSERTPSMask - Return true if the specified VECTOR_SHUFFLE operand
4267 /// specifies a shuffle of elements that is suitable for input to INSERTPS.
4268 /// i. e: If all but one element come from the same vector.
4269 static bool isINSERTPSMask(ArrayRef<int> Mask, MVT VT) {
4270 // TODO: Deal with AVX's VINSERTPS
4271 if (!VT.is128BitVector() || (VT != MVT::v4f32 && VT != MVT::v4i32))
4274 unsigned CorrectPosV1 = 0;
4275 unsigned CorrectPosV2 = 0;
4276 for (int i = 0, e = (int)VT.getVectorNumElements(); i != e; ++i) {
4277 if (Mask[i] == -1) {
4285 else if (Mask[i] == i + 4)
4289 if (CorrectPosV1 == 3 || CorrectPosV2 == 3)
4290 // We have 3 elements (undefs count as elements from any vector) from one
4291 // vector, and one from another.
4298 // Some special combinations that can be optimized.
4301 SDValue Compact8x32ShuffleNode(ShuffleVectorSDNode *SVOp,
4302 SelectionDAG &DAG) {
4303 MVT VT = SVOp->getSimpleValueType(0);
4306 if (VT != MVT::v8i32 && VT != MVT::v8f32)
4309 ArrayRef<int> Mask = SVOp->getMask();
4311 // These are the special masks that may be optimized.
4312 static const int MaskToOptimizeEven[] = {0, 8, 2, 10, 4, 12, 6, 14};
4313 static const int MaskToOptimizeOdd[] = {1, 9, 3, 11, 5, 13, 7, 15};
4314 bool MatchEvenMask = true;
4315 bool MatchOddMask = true;
4316 for (int i=0; i<8; ++i) {
4317 if (!isUndefOrEqual(Mask[i], MaskToOptimizeEven[i]))
4318 MatchEvenMask = false;
4319 if (!isUndefOrEqual(Mask[i], MaskToOptimizeOdd[i]))
4320 MatchOddMask = false;
4323 if (!MatchEvenMask && !MatchOddMask)
4326 SDValue UndefNode = DAG.getNode(ISD::UNDEF, dl, VT);
4328 SDValue Op0 = SVOp->getOperand(0);
4329 SDValue Op1 = SVOp->getOperand(1);
4331 if (MatchEvenMask) {
4332 // Shift the second operand right to 32 bits.
4333 static const int ShiftRightMask[] = {-1, 0, -1, 2, -1, 4, -1, 6 };
4334 Op1 = DAG.getVectorShuffle(VT, dl, Op1, UndefNode, ShiftRightMask);
4336 // Shift the first operand left to 32 bits.
4337 static const int ShiftLeftMask[] = {1, -1, 3, -1, 5, -1, 7, -1 };
4338 Op0 = DAG.getVectorShuffle(VT, dl, Op0, UndefNode, ShiftLeftMask);
4340 static const int BlendMask[] = {0, 9, 2, 11, 4, 13, 6, 15};
4341 return DAG.getVectorShuffle(VT, dl, Op0, Op1, BlendMask);
4344 /// isUNPCKLMask - Return true if the specified VECTOR_SHUFFLE operand
4345 /// specifies a shuffle of elements that is suitable for input to UNPCKL.
4346 static bool isUNPCKLMask(ArrayRef<int> Mask, MVT VT,
4347 bool HasInt256, bool V2IsSplat = false) {
4349 assert(VT.getSizeInBits() >= 128 &&
4350 "Unsupported vector type for unpckl");
4352 unsigned NumElts = VT.getVectorNumElements();
4353 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4354 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4357 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4358 "Unsupported vector type for unpckh");
4360 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4361 unsigned NumLanes = VT.getSizeInBits()/128;
4362 unsigned NumLaneElts = NumElts/NumLanes;
4364 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4365 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4366 int BitI = Mask[l+i];
4367 int BitI1 = Mask[l+i+1];
4368 if (!isUndefOrEqual(BitI, j))
4371 if (!isUndefOrEqual(BitI1, NumElts))
4374 if (!isUndefOrEqual(BitI1, j + NumElts))
4383 /// isUNPCKHMask - Return true if the specified VECTOR_SHUFFLE operand
4384 /// specifies a shuffle of elements that is suitable for input to UNPCKH.
4385 static bool isUNPCKHMask(ArrayRef<int> Mask, MVT VT,
4386 bool HasInt256, bool V2IsSplat = false) {
4387 assert(VT.getSizeInBits() >= 128 &&
4388 "Unsupported vector type for unpckh");
4390 unsigned NumElts = VT.getVectorNumElements();
4391 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4392 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4395 assert((!VT.is512BitVector() || VT.getScalarType().getSizeInBits() >= 32) &&
4396 "Unsupported vector type for unpckh");
4398 // AVX defines UNPCK* to operate independently on 128-bit lanes.
4399 unsigned NumLanes = VT.getSizeInBits()/128;
4400 unsigned NumLaneElts = NumElts/NumLanes;
4402 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4403 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4404 int BitI = Mask[l+i];
4405 int BitI1 = Mask[l+i+1];
4406 if (!isUndefOrEqual(BitI, j))
4409 if (isUndefOrEqual(BitI1, NumElts))
4412 if (!isUndefOrEqual(BitI1, j+NumElts))
4420 /// isUNPCKL_v_undef_Mask - Special case of isUNPCKLMask for canonical form
4421 /// of vector_shuffle v, v, <0, 4, 1, 5>, i.e. vector_shuffle v, undef,
4423 static bool isUNPCKL_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4424 unsigned NumElts = VT.getVectorNumElements();
4425 bool Is256BitVec = VT.is256BitVector();
4427 if (VT.is512BitVector())
4429 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4430 "Unsupported vector type for unpckh");
4432 if (Is256BitVec && NumElts != 4 && NumElts != 8 &&
4433 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4436 // For 256-bit i64/f64, use MOVDDUPY instead, so reject the matching pattern
4437 // FIXME: Need a better way to get rid of this, there's no latency difference
4438 // between UNPCKLPD and MOVDDUP, the later should always be checked first and
4439 // the former later. We should also remove the "_undef" special mask.
4440 if (NumElts == 4 && Is256BitVec)
4443 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4444 // independently on 128-bit lanes.
4445 unsigned NumLanes = VT.getSizeInBits()/128;
4446 unsigned NumLaneElts = NumElts/NumLanes;
4448 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4449 for (unsigned i = 0, j = l; i != NumLaneElts; i += 2, ++j) {
4450 int BitI = Mask[l+i];
4451 int BitI1 = Mask[l+i+1];
4453 if (!isUndefOrEqual(BitI, j))
4455 if (!isUndefOrEqual(BitI1, j))
4463 /// isUNPCKH_v_undef_Mask - Special case of isUNPCKHMask for canonical form
4464 /// of vector_shuffle v, v, <2, 6, 3, 7>, i.e. vector_shuffle v, undef,
4466 static bool isUNPCKH_v_undef_Mask(ArrayRef<int> Mask, MVT VT, bool HasInt256) {
4467 unsigned NumElts = VT.getVectorNumElements();
4469 if (VT.is512BitVector())
4472 assert((VT.is128BitVector() || VT.is256BitVector()) &&
4473 "Unsupported vector type for unpckh");
4475 if (VT.is256BitVector() && NumElts != 4 && NumElts != 8 &&
4476 (!HasInt256 || (NumElts != 16 && NumElts != 32)))
4479 // Handle 128 and 256-bit vector lengths. AVX defines UNPCK* to operate
4480 // independently on 128-bit lanes.
4481 unsigned NumLanes = VT.getSizeInBits()/128;
4482 unsigned NumLaneElts = NumElts/NumLanes;
4484 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
4485 for (unsigned i = 0, j = l+NumLaneElts/2; i != NumLaneElts; i += 2, ++j) {
4486 int BitI = Mask[l+i];
4487 int BitI1 = Mask[l+i+1];
4488 if (!isUndefOrEqual(BitI, j))
4490 if (!isUndefOrEqual(BitI1, j))
4497 // Match for INSERTI64x4 INSERTF64x4 instructions (src0[0], src1[0]) or
4498 // (src1[0], src0[1]), manipulation with 256-bit sub-vectors
4499 static bool isINSERT64x4Mask(ArrayRef<int> Mask, MVT VT, unsigned int *Imm) {
4500 if (!VT.is512BitVector())
4503 unsigned NumElts = VT.getVectorNumElements();
4504 unsigned HalfSize = NumElts/2;
4505 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, 0)) {
4506 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, NumElts)) {
4511 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, NumElts)) {
4512 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, HalfSize)) {
4520 /// isMOVLMask - Return true if the specified VECTOR_SHUFFLE operand
4521 /// specifies a shuffle of elements that is suitable for input to MOVSS,
4522 /// MOVSD, and MOVD, i.e. setting the lowest element.
4523 static bool isMOVLMask(ArrayRef<int> Mask, EVT VT) {
4524 if (VT.getVectorElementType().getSizeInBits() < 32)
4526 if (!VT.is128BitVector())
4529 unsigned NumElts = VT.getVectorNumElements();
4531 if (!isUndefOrEqual(Mask[0], NumElts))
4534 for (unsigned i = 1; i != NumElts; ++i)
4535 if (!isUndefOrEqual(Mask[i], i))
4541 /// isVPERM2X128Mask - Match 256-bit shuffles where the elements are considered
4542 /// as permutations between 128-bit chunks or halves. As an example: this
4544 /// vector_shuffle <4, 5, 6, 7, 12, 13, 14, 15>
4545 /// The first half comes from the second half of V1 and the second half from the
4546 /// the second half of V2.
4547 static bool isVPERM2X128Mask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4548 if (!HasFp256 || !VT.is256BitVector())
4551 // The shuffle result is divided into half A and half B. In total the two
4552 // sources have 4 halves, namely: C, D, E, F. The final values of A and
4553 // B must come from C, D, E or F.
4554 unsigned HalfSize = VT.getVectorNumElements()/2;
4555 bool MatchA = false, MatchB = false;
4557 // Check if A comes from one of C, D, E, F.
4558 for (unsigned Half = 0; Half != 4; ++Half) {
4559 if (isSequentialOrUndefInRange(Mask, 0, HalfSize, Half*HalfSize)) {
4565 // Check if B comes from one of C, D, E, F.
4566 for (unsigned Half = 0; Half != 4; ++Half) {
4567 if (isSequentialOrUndefInRange(Mask, HalfSize, HalfSize, Half*HalfSize)) {
4573 return MatchA && MatchB;
4576 /// getShuffleVPERM2X128Immediate - Return the appropriate immediate to shuffle
4577 /// the specified VECTOR_MASK mask with VPERM2F128/VPERM2I128 instructions.
4578 static unsigned getShuffleVPERM2X128Immediate(ShuffleVectorSDNode *SVOp) {
4579 MVT VT = SVOp->getSimpleValueType(0);
4581 unsigned HalfSize = VT.getVectorNumElements()/2;
4583 unsigned FstHalf = 0, SndHalf = 0;
4584 for (unsigned i = 0; i < HalfSize; ++i) {
4585 if (SVOp->getMaskElt(i) > 0) {
4586 FstHalf = SVOp->getMaskElt(i)/HalfSize;
4590 for (unsigned i = HalfSize; i < HalfSize*2; ++i) {
4591 if (SVOp->getMaskElt(i) > 0) {
4592 SndHalf = SVOp->getMaskElt(i)/HalfSize;
4597 return (FstHalf | (SndHalf << 4));
4600 // Symmetric in-lane mask. Each lane has 4 elements (for imm8)
4601 static bool isPermImmMask(ArrayRef<int> Mask, MVT VT, unsigned& Imm8) {
4602 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4606 unsigned NumElts = VT.getVectorNumElements();
4608 if (VT.is128BitVector() || (VT.is256BitVector() && EltSize == 64)) {
4609 for (unsigned i = 0; i != NumElts; ++i) {
4612 Imm8 |= Mask[i] << (i*2);
4617 unsigned LaneSize = 4;
4618 SmallVector<int, 4> MaskVal(LaneSize, -1);
4620 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4621 for (unsigned i = 0; i != LaneSize; ++i) {
4622 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4626 if (MaskVal[i] < 0) {
4627 MaskVal[i] = Mask[i+l] - l;
4628 Imm8 |= MaskVal[i] << (i*2);
4631 if (Mask[i+l] != (signed)(MaskVal[i]+l))
4638 /// isVPERMILPMask - Return true if the specified VECTOR_SHUFFLE operand
4639 /// specifies a shuffle of elements that is suitable for input to VPERMILPD*.
4640 /// Note that VPERMIL mask matching is different depending whether theunderlying
4641 /// type is 32 or 64. In the VPERMILPS the high half of the mask should point
4642 /// to the same elements of the low, but to the higher half of the source.
4643 /// In VPERMILPD the two lanes could be shuffled independently of each other
4644 /// with the same restriction that lanes can't be crossed. Also handles PSHUFDY.
4645 static bool isVPERMILPMask(ArrayRef<int> Mask, MVT VT) {
4646 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
4647 if (VT.getSizeInBits() < 256 || EltSize < 32)
4649 bool symmetricMaskRequired = (EltSize == 32);
4650 unsigned NumElts = VT.getVectorNumElements();
4652 unsigned NumLanes = VT.getSizeInBits()/128;
4653 unsigned LaneSize = NumElts/NumLanes;
4654 // 2 or 4 elements in one lane
4656 SmallVector<int, 4> ExpectedMaskVal(LaneSize, -1);
4657 for (unsigned l = 0; l != NumElts; l += LaneSize) {
4658 for (unsigned i = 0; i != LaneSize; ++i) {
4659 if (!isUndefOrInRange(Mask[i+l], l, l+LaneSize))
4661 if (symmetricMaskRequired) {
4662 if (ExpectedMaskVal[i] < 0 && Mask[i+l] >= 0) {
4663 ExpectedMaskVal[i] = Mask[i+l] - l;
4666 if (!isUndefOrEqual(Mask[i+l], ExpectedMaskVal[i]+l))
4674 /// isCommutedMOVLMask - Returns true if the shuffle mask is except the reverse
4675 /// of what x86 movss want. X86 movs requires the lowest element to be lowest
4676 /// element of vector 2 and the other elements to come from vector 1 in order.
4677 static bool isCommutedMOVLMask(ArrayRef<int> Mask, MVT VT,
4678 bool V2IsSplat = false, bool V2IsUndef = false) {
4679 if (!VT.is128BitVector())
4682 unsigned NumOps = VT.getVectorNumElements();
4683 if (NumOps != 2 && NumOps != 4 && NumOps != 8 && NumOps != 16)
4686 if (!isUndefOrEqual(Mask[0], 0))
4689 for (unsigned i = 1; i != NumOps; ++i)
4690 if (!(isUndefOrEqual(Mask[i], i+NumOps) ||
4691 (V2IsUndef && isUndefOrInRange(Mask[i], NumOps, NumOps*2)) ||
4692 (V2IsSplat && isUndefOrEqual(Mask[i], NumOps))))
4698 /// isMOVSHDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4699 /// specifies a shuffle of elements that is suitable for input to MOVSHDUP.
4700 /// Masks to match: <1, 1, 3, 3> or <1, 1, 3, 3, 5, 5, 7, 7>
4701 static bool isMOVSHDUPMask(ArrayRef<int> Mask, MVT VT,
4702 const X86Subtarget *Subtarget) {
4703 if (!Subtarget->hasSSE3())
4706 unsigned NumElems = VT.getVectorNumElements();
4708 if ((VT.is128BitVector() && NumElems != 4) ||
4709 (VT.is256BitVector() && NumElems != 8) ||
4710 (VT.is512BitVector() && NumElems != 16))
4713 // "i+1" is the value the indexed mask element must have
4714 for (unsigned i = 0; i != NumElems; i += 2)
4715 if (!isUndefOrEqual(Mask[i], i+1) ||
4716 !isUndefOrEqual(Mask[i+1], i+1))
4722 /// isMOVSLDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4723 /// specifies a shuffle of elements that is suitable for input to MOVSLDUP.
4724 /// Masks to match: <0, 0, 2, 2> or <0, 0, 2, 2, 4, 4, 6, 6>
4725 static bool isMOVSLDUPMask(ArrayRef<int> Mask, MVT VT,
4726 const X86Subtarget *Subtarget) {
4727 if (!Subtarget->hasSSE3())
4730 unsigned NumElems = VT.getVectorNumElements();
4732 if ((VT.is128BitVector() && NumElems != 4) ||
4733 (VT.is256BitVector() && NumElems != 8) ||
4734 (VT.is512BitVector() && NumElems != 16))
4737 // "i" is the value the indexed mask element must have
4738 for (unsigned i = 0; i != NumElems; i += 2)
4739 if (!isUndefOrEqual(Mask[i], i) ||
4740 !isUndefOrEqual(Mask[i+1], i))
4746 /// isMOVDDUPYMask - Return true if the specified VECTOR_SHUFFLE operand
4747 /// specifies a shuffle of elements that is suitable for input to 256-bit
4748 /// version of MOVDDUP.
4749 static bool isMOVDDUPYMask(ArrayRef<int> Mask, MVT VT, bool HasFp256) {
4750 if (!HasFp256 || !VT.is256BitVector())
4753 unsigned NumElts = VT.getVectorNumElements();
4757 for (unsigned i = 0; i != NumElts/2; ++i)
4758 if (!isUndefOrEqual(Mask[i], 0))
4760 for (unsigned i = NumElts/2; i != NumElts; ++i)
4761 if (!isUndefOrEqual(Mask[i], NumElts/2))
4766 /// isMOVDDUPMask - Return true if the specified VECTOR_SHUFFLE operand
4767 /// specifies a shuffle of elements that is suitable for input to 128-bit
4768 /// version of MOVDDUP.
4769 static bool isMOVDDUPMask(ArrayRef<int> Mask, MVT VT) {
4770 if (!VT.is128BitVector())
4773 unsigned e = VT.getVectorNumElements() / 2;
4774 for (unsigned i = 0; i != e; ++i)
4775 if (!isUndefOrEqual(Mask[i], i))
4777 for (unsigned i = 0; i != e; ++i)
4778 if (!isUndefOrEqual(Mask[e+i], i))
4783 /// isVEXTRACTIndex - Return true if the specified
4784 /// EXTRACT_SUBVECTOR operand specifies a vector extract that is
4785 /// suitable for instruction that extract 128 or 256 bit vectors
4786 static bool isVEXTRACTIndex(SDNode *N, unsigned vecWidth) {
4787 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4788 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4791 // The index should be aligned on a vecWidth-bit boundary.
4793 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4795 MVT VT = N->getSimpleValueType(0);
4796 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4797 bool Result = (Index * ElSize) % vecWidth == 0;
4802 /// isVINSERTIndex - Return true if the specified INSERT_SUBVECTOR
4803 /// operand specifies a subvector insert that is suitable for input to
4804 /// insertion of 128 or 256-bit subvectors
4805 static bool isVINSERTIndex(SDNode *N, unsigned vecWidth) {
4806 assert((vecWidth == 128 || vecWidth == 256) && "Unexpected vector width");
4807 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4809 // The index should be aligned on a vecWidth-bit boundary.
4811 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4813 MVT VT = N->getSimpleValueType(0);
4814 unsigned ElSize = VT.getVectorElementType().getSizeInBits();
4815 bool Result = (Index * ElSize) % vecWidth == 0;
4820 bool X86::isVINSERT128Index(SDNode *N) {
4821 return isVINSERTIndex(N, 128);
4824 bool X86::isVINSERT256Index(SDNode *N) {
4825 return isVINSERTIndex(N, 256);
4828 bool X86::isVEXTRACT128Index(SDNode *N) {
4829 return isVEXTRACTIndex(N, 128);
4832 bool X86::isVEXTRACT256Index(SDNode *N) {
4833 return isVEXTRACTIndex(N, 256);
4836 /// getShuffleSHUFImmediate - Return the appropriate immediate to shuffle
4837 /// the specified VECTOR_SHUFFLE mask with PSHUF* and SHUFP* instructions.
4838 /// Handles 128-bit and 256-bit.
4839 static unsigned getShuffleSHUFImmediate(ShuffleVectorSDNode *N) {
4840 MVT VT = N->getSimpleValueType(0);
4842 assert((VT.getSizeInBits() >= 128) &&
4843 "Unsupported vector type for PSHUF/SHUFP");
4845 // Handle 128 and 256-bit vector lengths. AVX defines PSHUF/SHUFP to operate
4846 // independently on 128-bit lanes.
4847 unsigned NumElts = VT.getVectorNumElements();
4848 unsigned NumLanes = VT.getSizeInBits()/128;
4849 unsigned NumLaneElts = NumElts/NumLanes;
4851 assert((NumLaneElts == 2 || NumLaneElts == 4 || NumLaneElts == 8) &&
4852 "Only supports 2, 4 or 8 elements per lane");
4854 unsigned Shift = (NumLaneElts >= 4) ? 1 : 0;
4856 for (unsigned i = 0; i != NumElts; ++i) {
4857 int Elt = N->getMaskElt(i);
4858 if (Elt < 0) continue;
4859 Elt &= NumLaneElts - 1;
4860 unsigned ShAmt = (i << Shift) % 8;
4861 Mask |= Elt << ShAmt;
4867 /// getShufflePSHUFHWImmediate - Return the appropriate immediate to shuffle
4868 /// the specified VECTOR_SHUFFLE mask with the PSHUFHW instruction.
4869 static unsigned getShufflePSHUFHWImmediate(ShuffleVectorSDNode *N) {
4870 MVT VT = N->getSimpleValueType(0);
4872 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4873 "Unsupported vector type for PSHUFHW");
4875 unsigned NumElts = VT.getVectorNumElements();
4878 for (unsigned l = 0; l != NumElts; l += 8) {
4879 // 8 nodes per lane, but we only care about the last 4.
4880 for (unsigned i = 0; i < 4; ++i) {
4881 int Elt = N->getMaskElt(l+i+4);
4882 if (Elt < 0) continue;
4883 Elt &= 0x3; // only 2-bits.
4884 Mask |= Elt << (i * 2);
4891 /// getShufflePSHUFLWImmediate - Return the appropriate immediate to shuffle
4892 /// the specified VECTOR_SHUFFLE mask with the PSHUFLW instruction.
4893 static unsigned getShufflePSHUFLWImmediate(ShuffleVectorSDNode *N) {
4894 MVT VT = N->getSimpleValueType(0);
4896 assert((VT == MVT::v8i16 || VT == MVT::v16i16) &&
4897 "Unsupported vector type for PSHUFHW");
4899 unsigned NumElts = VT.getVectorNumElements();
4902 for (unsigned l = 0; l != NumElts; l += 8) {
4903 // 8 nodes per lane, but we only care about the first 4.
4904 for (unsigned i = 0; i < 4; ++i) {
4905 int Elt = N->getMaskElt(l+i);
4906 if (Elt < 0) continue;
4907 Elt &= 0x3; // only 2-bits
4908 Mask |= Elt << (i * 2);
4915 /// \brief Return the appropriate immediate to shuffle the specified
4916 /// VECTOR_SHUFFLE mask with the PALIGNR (if InterLane is false) or with
4917 /// VALIGN (if Interlane is true) instructions.
4918 static unsigned getShuffleAlignrImmediate(ShuffleVectorSDNode *SVOp,
4920 MVT VT = SVOp->getSimpleValueType(0);
4921 unsigned EltSize = InterLane ? 1 :
4922 VT.getVectorElementType().getSizeInBits() >> 3;
4924 unsigned NumElts = VT.getVectorNumElements();
4925 unsigned NumLanes = VT.is512BitVector() ? 1 : VT.getSizeInBits()/128;
4926 unsigned NumLaneElts = NumElts/NumLanes;
4930 for (i = 0; i != NumElts; ++i) {
4931 Val = SVOp->getMaskElt(i);
4935 if (Val >= (int)NumElts)
4936 Val -= NumElts - NumLaneElts;
4938 assert(Val - i > 0 && "PALIGNR imm should be positive");
4939 return (Val - i) * EltSize;
4942 /// \brief Return the appropriate immediate to shuffle the specified
4943 /// VECTOR_SHUFFLE mask with the PALIGNR instruction.
4944 static unsigned getShufflePALIGNRImmediate(ShuffleVectorSDNode *SVOp) {
4945 return getShuffleAlignrImmediate(SVOp, false);
4948 /// \brief Return the appropriate immediate to shuffle the specified
4949 /// VECTOR_SHUFFLE mask with the VALIGN instruction.
4950 static unsigned getShuffleVALIGNImmediate(ShuffleVectorSDNode *SVOp) {
4951 return getShuffleAlignrImmediate(SVOp, true);
4955 static unsigned getExtractVEXTRACTImmediate(SDNode *N, unsigned vecWidth) {
4956 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4957 if (!isa<ConstantSDNode>(N->getOperand(1).getNode()))
4958 llvm_unreachable("Illegal extract subvector for VEXTRACT");
4961 cast<ConstantSDNode>(N->getOperand(1).getNode())->getZExtValue();
4963 MVT VecVT = N->getOperand(0).getSimpleValueType();
4964 MVT ElVT = VecVT.getVectorElementType();
4966 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4967 return Index / NumElemsPerChunk;
4970 static unsigned getInsertVINSERTImmediate(SDNode *N, unsigned vecWidth) {
4971 assert((vecWidth == 128 || vecWidth == 256) && "Unsupported vector width");
4972 if (!isa<ConstantSDNode>(N->getOperand(2).getNode()))
4973 llvm_unreachable("Illegal insert subvector for VINSERT");
4976 cast<ConstantSDNode>(N->getOperand(2).getNode())->getZExtValue();
4978 MVT VecVT = N->getSimpleValueType(0);
4979 MVT ElVT = VecVT.getVectorElementType();
4981 unsigned NumElemsPerChunk = vecWidth / ElVT.getSizeInBits();
4982 return Index / NumElemsPerChunk;
4985 /// getExtractVEXTRACT128Immediate - Return the appropriate immediate
4986 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF128
4987 /// and VINSERTI128 instructions.
4988 unsigned X86::getExtractVEXTRACT128Immediate(SDNode *N) {
4989 return getExtractVEXTRACTImmediate(N, 128);
4992 /// getExtractVEXTRACT256Immediate - Return the appropriate immediate
4993 /// to extract the specified EXTRACT_SUBVECTOR index with VEXTRACTF64x4
4994 /// and VINSERTI64x4 instructions.
4995 unsigned X86::getExtractVEXTRACT256Immediate(SDNode *N) {
4996 return getExtractVEXTRACTImmediate(N, 256);
4999 /// getInsertVINSERT128Immediate - Return the appropriate immediate
5000 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF128
5001 /// and VINSERTI128 instructions.
5002 unsigned X86::getInsertVINSERT128Immediate(SDNode *N) {
5003 return getInsertVINSERTImmediate(N, 128);
5006 /// getInsertVINSERT256Immediate - Return the appropriate immediate
5007 /// to insert at the specified INSERT_SUBVECTOR index with VINSERTF46x4
5008 /// and VINSERTI64x4 instructions.
5009 unsigned X86::getInsertVINSERT256Immediate(SDNode *N) {
5010 return getInsertVINSERTImmediate(N, 256);
5013 /// isZero - Returns true if Elt is a constant integer zero
5014 static bool isZero(SDValue V) {
5015 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
5016 return C && C->isNullValue();
5019 /// isZeroNode - Returns true if Elt is a constant zero or a floating point
5021 bool X86::isZeroNode(SDValue Elt) {
5024 if (ConstantFPSDNode *CFP = dyn_cast<ConstantFPSDNode>(Elt))
5025 return CFP->getValueAPF().isPosZero();
5029 /// ShouldXformToMOVHLPS - Return true if the node should be transformed to
5030 /// match movhlps. The lower half elements should come from upper half of
5031 /// V1 (and in order), and the upper half elements should come from the upper
5032 /// half of V2 (and in order).
5033 static bool ShouldXformToMOVHLPS(ArrayRef<int> Mask, MVT VT) {
5034 if (!VT.is128BitVector())
5036 if (VT.getVectorNumElements() != 4)
5038 for (unsigned i = 0, e = 2; i != e; ++i)
5039 if (!isUndefOrEqual(Mask[i], i+2))
5041 for (unsigned i = 2; i != 4; ++i)
5042 if (!isUndefOrEqual(Mask[i], i+4))
5047 /// isScalarLoadToVector - Returns true if the node is a scalar load that
5048 /// is promoted to a vector. It also returns the LoadSDNode by reference if
5050 static bool isScalarLoadToVector(SDNode *N, LoadSDNode **LD = nullptr) {
5051 if (N->getOpcode() != ISD::SCALAR_TO_VECTOR)
5053 N = N->getOperand(0).getNode();
5054 if (!ISD::isNON_EXTLoad(N))
5057 *LD = cast<LoadSDNode>(N);
5061 // Test whether the given value is a vector value which will be legalized
5063 static bool WillBeConstantPoolLoad(SDNode *N) {
5064 if (N->getOpcode() != ISD::BUILD_VECTOR)
5067 // Check for any non-constant elements.
5068 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
5069 switch (N->getOperand(i).getNode()->getOpcode()) {
5071 case ISD::ConstantFP:
5078 // Vectors of all-zeros and all-ones are materialized with special
5079 // instructions rather than being loaded.
5080 return !ISD::isBuildVectorAllZeros(N) &&
5081 !ISD::isBuildVectorAllOnes(N);
5084 /// ShouldXformToMOVLP{S|D} - Return true if the node should be transformed to
5085 /// match movlp{s|d}. The lower half elements should come from lower half of
5086 /// V1 (and in order), and the upper half elements should come from the upper
5087 /// half of V2 (and in order). And since V1 will become the source of the
5088 /// MOVLP, it must be either a vector load or a scalar load to vector.
5089 static bool ShouldXformToMOVLP(SDNode *V1, SDNode *V2,
5090 ArrayRef<int> Mask, MVT VT) {
5091 if (!VT.is128BitVector())
5094 if (!ISD::isNON_EXTLoad(V1) && !isScalarLoadToVector(V1))
5096 // Is V2 is a vector load, don't do this transformation. We will try to use
5097 // load folding shufps op.
5098 if (ISD::isNON_EXTLoad(V2) || WillBeConstantPoolLoad(V2))
5101 unsigned NumElems = VT.getVectorNumElements();
5103 if (NumElems != 2 && NumElems != 4)
5105 for (unsigned i = 0, e = NumElems/2; i != e; ++i)
5106 if (!isUndefOrEqual(Mask[i], i))
5108 for (unsigned i = NumElems/2, e = NumElems; i != e; ++i)
5109 if (!isUndefOrEqual(Mask[i], i+NumElems))
5114 /// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
5115 /// to an zero vector.
5116 /// FIXME: move to dag combiner / method on ShuffleVectorSDNode
5117 static bool isZeroShuffle(ShuffleVectorSDNode *N) {
5118 SDValue V1 = N->getOperand(0);
5119 SDValue V2 = N->getOperand(1);
5120 unsigned NumElems = N->getValueType(0).getVectorNumElements();
5121 for (unsigned i = 0; i != NumElems; ++i) {
5122 int Idx = N->getMaskElt(i);
5123 if (Idx >= (int)NumElems) {
5124 unsigned Opc = V2.getOpcode();
5125 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V2.getNode()))
5127 if (Opc != ISD::BUILD_VECTOR ||
5128 !X86::isZeroNode(V2.getOperand(Idx-NumElems)))
5130 } else if (Idx >= 0) {
5131 unsigned Opc = V1.getOpcode();
5132 if (Opc == ISD::UNDEF || ISD::isBuildVectorAllZeros(V1.getNode()))
5134 if (Opc != ISD::BUILD_VECTOR ||
5135 !X86::isZeroNode(V1.getOperand(Idx)))
5142 /// getZeroVector - Returns a vector of specified type with all zero elements.
5144 static SDValue getZeroVector(EVT VT, const X86Subtarget *Subtarget,
5145 SelectionDAG &DAG, SDLoc dl) {
5146 assert(VT.isVector() && "Expected a vector type");
5148 // Always build SSE zero vectors as <4 x i32> bitcasted
5149 // to their dest type. This ensures they get CSE'd.
5151 if (VT.is128BitVector()) { // SSE
5152 if (Subtarget->hasSSE2()) { // SSE2
5153 SDValue Cst = DAG.getConstant(0, MVT::i32);
5154 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5156 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5157 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4f32, Cst, Cst, Cst, Cst);
5159 } else if (VT.is256BitVector()) { // AVX
5160 if (Subtarget->hasInt256()) { // AVX2
5161 SDValue Cst = DAG.getConstant(0, MVT::i32);
5162 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5163 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5165 // 256-bit logic and arithmetic instructions in AVX are all
5166 // floating-point, no support for integer ops. Emit fp zeroed vectors.
5167 SDValue Cst = DAG.getConstantFP(+0.0, MVT::f32);
5168 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5169 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8f32, Ops);
5171 } else if (VT.is512BitVector()) { // AVX-512
5172 SDValue Cst = DAG.getConstant(0, MVT::i32);
5173 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst,
5174 Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5175 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v16i32, Ops);
5176 } else if (VT.getScalarType() == MVT::i1) {
5177 assert(VT.getVectorNumElements() <= 16 && "Unexpected vector type");
5178 SDValue Cst = DAG.getConstant(0, MVT::i1);
5179 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
5180 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
5182 llvm_unreachable("Unexpected vector type");
5184 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5187 /// getOnesVector - Returns a vector of specified type with all bits set.
5188 /// Always build ones vectors as <4 x i32> or <8 x i32>. For 256-bit types with
5189 /// no AVX2 supprt, use two <4 x i32> inserted in a <8 x i32> appropriately.
5190 /// Then bitcast to their original type, ensuring they get CSE'd.
5191 static SDValue getOnesVector(MVT VT, bool HasInt256, SelectionDAG &DAG,
5193 assert(VT.isVector() && "Expected a vector type");
5195 SDValue Cst = DAG.getConstant(~0U, MVT::i32);
5197 if (VT.is256BitVector()) {
5198 if (HasInt256) { // AVX2
5199 SDValue Ops[] = { Cst, Cst, Cst, Cst, Cst, Cst, Cst, Cst };
5200 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v8i32, Ops);
5202 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5203 Vec = Concat128BitVectors(Vec, Vec, MVT::v8i32, 8, DAG, dl);
5205 } else if (VT.is128BitVector()) {
5206 Vec = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32, Cst, Cst, Cst, Cst);
5208 llvm_unreachable("Unexpected vector type");
5210 return DAG.getNode(ISD::BITCAST, dl, VT, Vec);
5213 /// NormalizeMask - V2 is a splat, modify the mask (if needed) so all elements
5214 /// that point to V2 points to its first element.
5215 static void NormalizeMask(SmallVectorImpl<int> &Mask, unsigned NumElems) {
5216 for (unsigned i = 0; i != NumElems; ++i) {
5217 if (Mask[i] > (int)NumElems) {
5223 /// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
5224 /// operation of specified width.
5225 static SDValue getMOVL(SelectionDAG &DAG, SDLoc dl, EVT VT, SDValue V1,
5227 unsigned NumElems = VT.getVectorNumElements();
5228 SmallVector<int, 8> Mask;
5229 Mask.push_back(NumElems);
5230 for (unsigned i = 1; i != NumElems; ++i)
5232 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5235 /// getUnpackl - Returns a vector_shuffle node for an unpackl operation.
5236 static SDValue getUnpackl(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5238 unsigned NumElems = VT.getVectorNumElements();
5239 SmallVector<int, 8> Mask;
5240 for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
5242 Mask.push_back(i + NumElems);
5244 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5247 /// getUnpackh - Returns a vector_shuffle node for an unpackh operation.
5248 static SDValue getUnpackh(SelectionDAG &DAG, SDLoc dl, MVT VT, SDValue V1,
5250 unsigned NumElems = VT.getVectorNumElements();
5251 SmallVector<int, 8> Mask;
5252 for (unsigned i = 0, Half = NumElems/2; i != Half; ++i) {
5253 Mask.push_back(i + Half);
5254 Mask.push_back(i + NumElems + Half);
5256 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask[0]);
5259 // PromoteSplati8i16 - All i16 and i8 vector types can't be used directly by
5260 // a generic shuffle instruction because the target has no such instructions.
5261 // Generate shuffles which repeat i16 and i8 several times until they can be
5262 // represented by v4f32 and then be manipulated by target suported shuffles.
5263 static SDValue PromoteSplati8i16(SDValue V, SelectionDAG &DAG, int &EltNo) {
5264 MVT VT = V.getSimpleValueType();
5265 int NumElems = VT.getVectorNumElements();
5268 while (NumElems > 4) {
5269 if (EltNo < NumElems/2) {
5270 V = getUnpackl(DAG, dl, VT, V, V);
5272 V = getUnpackh(DAG, dl, VT, V, V);
5273 EltNo -= NumElems/2;
5280 /// getLegalSplat - Generate a legal splat with supported x86 shuffles
5281 static SDValue getLegalSplat(SelectionDAG &DAG, SDValue V, int EltNo) {
5282 MVT VT = V.getSimpleValueType();
5285 if (VT.is128BitVector()) {
5286 V = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V);
5287 int SplatMask[4] = { EltNo, EltNo, EltNo, EltNo };
5288 V = DAG.getVectorShuffle(MVT::v4f32, dl, V, DAG.getUNDEF(MVT::v4f32),
5290 } else if (VT.is256BitVector()) {
5291 // To use VPERMILPS to splat scalars, the second half of indicies must
5292 // refer to the higher part, which is a duplication of the lower one,
5293 // because VPERMILPS can only handle in-lane permutations.
5294 int SplatMask[8] = { EltNo, EltNo, EltNo, EltNo,
5295 EltNo+4, EltNo+4, EltNo+4, EltNo+4 };
5297 V = DAG.getNode(ISD::BITCAST, dl, MVT::v8f32, V);
5298 V = DAG.getVectorShuffle(MVT::v8f32, dl, V, DAG.getUNDEF(MVT::v8f32),
5301 llvm_unreachable("Vector size not supported");
5303 return DAG.getNode(ISD::BITCAST, dl, VT, V);
5306 /// PromoteSplat - Splat is promoted to target supported vector shuffles.
5307 static SDValue PromoteSplat(ShuffleVectorSDNode *SV, SelectionDAG &DAG) {
5308 MVT SrcVT = SV->getSimpleValueType(0);
5309 SDValue V1 = SV->getOperand(0);
5312 int EltNo = SV->getSplatIndex();
5313 int NumElems = SrcVT.getVectorNumElements();
5314 bool Is256BitVec = SrcVT.is256BitVector();
5316 assert(((SrcVT.is128BitVector() && NumElems > 4) || Is256BitVec) &&
5317 "Unknown how to promote splat for type");
5319 // Extract the 128-bit part containing the splat element and update
5320 // the splat element index when it refers to the higher register.
5322 V1 = Extract128BitVector(V1, EltNo, DAG, dl);
5323 if (EltNo >= NumElems/2)
5324 EltNo -= NumElems/2;
5327 // All i16 and i8 vector types can't be used directly by a generic shuffle
5328 // instruction because the target has no such instruction. Generate shuffles
5329 // which repeat i16 and i8 several times until they fit in i32, and then can
5330 // be manipulated by target suported shuffles.
5331 MVT EltVT = SrcVT.getVectorElementType();
5332 if (EltVT == MVT::i8 || EltVT == MVT::i16)
5333 V1 = PromoteSplati8i16(V1, DAG, EltNo);
5335 // Recreate the 256-bit vector and place the same 128-bit vector
5336 // into the low and high part. This is necessary because we want
5337 // to use VPERM* to shuffle the vectors
5339 V1 = DAG.getNode(ISD::CONCAT_VECTORS, dl, SrcVT, V1, V1);
5342 return getLegalSplat(DAG, V1, EltNo);
5345 /// getShuffleVectorZeroOrUndef - Return a vector_shuffle of the specified
5346 /// vector of zero or undef vector. This produces a shuffle where the low
5347 /// element of V2 is swizzled into the zero/undef vector, landing at element
5348 /// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
5349 static SDValue getShuffleVectorZeroOrUndef(SDValue V2, unsigned Idx,
5351 const X86Subtarget *Subtarget,
5352 SelectionDAG &DAG) {
5353 MVT VT = V2.getSimpleValueType();
5355 ? getZeroVector(VT, Subtarget, DAG, SDLoc(V2)) : DAG.getUNDEF(VT);
5356 unsigned NumElems = VT.getVectorNumElements();
5357 SmallVector<int, 16> MaskVec;
5358 for (unsigned i = 0; i != NumElems; ++i)
5359 // If this is the insertion idx, put the low elt of V2 here.
5360 MaskVec.push_back(i == Idx ? NumElems : i);
5361 return DAG.getVectorShuffle(VT, SDLoc(V2), V1, V2, &MaskVec[0]);
5364 /// getTargetShuffleMask - Calculates the shuffle mask corresponding to the
5365 /// target specific opcode. Returns true if the Mask could be calculated. Sets
5366 /// IsUnary to true if only uses one source. Note that this will set IsUnary for
5367 /// shuffles which use a single input multiple times, and in those cases it will
5368 /// adjust the mask to only have indices within that single input.
5369 static bool getTargetShuffleMask(SDNode *N, MVT VT,
5370 SmallVectorImpl<int> &Mask, bool &IsUnary) {
5371 unsigned NumElems = VT.getVectorNumElements();
5375 bool IsFakeUnary = false;
5376 switch(N->getOpcode()) {
5377 case X86ISD::BLENDI:
5378 ImmN = N->getOperand(N->getNumOperands()-1);
5379 DecodeBLENDMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5382 ImmN = N->getOperand(N->getNumOperands()-1);
5383 DecodeSHUFPMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5384 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5386 case X86ISD::UNPCKH:
5387 DecodeUNPCKHMask(VT, Mask);
5388 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5390 case X86ISD::UNPCKL:
5391 DecodeUNPCKLMask(VT, Mask);
5392 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5394 case X86ISD::MOVHLPS:
5395 DecodeMOVHLPSMask(NumElems, Mask);
5396 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5398 case X86ISD::MOVLHPS:
5399 DecodeMOVLHPSMask(NumElems, Mask);
5400 IsUnary = IsFakeUnary = N->getOperand(0) == N->getOperand(1);
5402 case X86ISD::PALIGNR:
5403 ImmN = N->getOperand(N->getNumOperands()-1);
5404 DecodePALIGNRMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5406 case X86ISD::PSHUFD:
5407 case X86ISD::VPERMILPI:
5408 ImmN = N->getOperand(N->getNumOperands()-1);
5409 DecodePSHUFMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5412 case X86ISD::PSHUFHW:
5413 ImmN = N->getOperand(N->getNumOperands()-1);
5414 DecodePSHUFHWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5417 case X86ISD::PSHUFLW:
5418 ImmN = N->getOperand(N->getNumOperands()-1);
5419 DecodePSHUFLWMask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5422 case X86ISD::PSHUFB: {
5424 SDValue MaskNode = N->getOperand(1);
5425 while (MaskNode->getOpcode() == ISD::BITCAST)
5426 MaskNode = MaskNode->getOperand(0);
5428 if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
5429 // If we have a build-vector, then things are easy.
5430 EVT VT = MaskNode.getValueType();
5431 assert(VT.isVector() &&
5432 "Can't produce a non-vector with a build_vector!");
5433 if (!VT.isInteger())
5436 int NumBytesPerElement = VT.getVectorElementType().getSizeInBits() / 8;
5438 SmallVector<uint64_t, 32> RawMask;
5439 for (int i = 0, e = MaskNode->getNumOperands(); i < e; ++i) {
5440 SDValue Op = MaskNode->getOperand(i);
5441 if (Op->getOpcode() == ISD::UNDEF) {
5442 RawMask.push_back((uint64_t)SM_SentinelUndef);
5445 auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
5448 APInt MaskElement = CN->getAPIntValue();
5450 // We now have to decode the element which could be any integer size and
5451 // extract each byte of it.
5452 for (int j = 0; j < NumBytesPerElement; ++j) {
5453 // Note that this is x86 and so always little endian: the low byte is
5454 // the first byte of the mask.
5455 RawMask.push_back(MaskElement.getLoBits(8).getZExtValue());
5456 MaskElement = MaskElement.lshr(8);
5459 DecodePSHUFBMask(RawMask, Mask);
5463 auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
5467 SDValue Ptr = MaskLoad->getBasePtr();
5468 if (Ptr->getOpcode() == X86ISD::Wrapper)
5469 Ptr = Ptr->getOperand(0);
5471 auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
5472 if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
5475 if (auto *C = dyn_cast<Constant>(MaskCP->getConstVal())) {
5476 DecodePSHUFBMask(C, Mask);
5484 case X86ISD::VPERMI:
5485 ImmN = N->getOperand(N->getNumOperands()-1);
5486 DecodeVPERMMask(cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5491 DecodeScalarMoveMask(VT, /* IsLoad */ false, Mask);
5493 case X86ISD::VPERM2X128:
5494 ImmN = N->getOperand(N->getNumOperands()-1);
5495 DecodeVPERM2X128Mask(VT, cast<ConstantSDNode>(ImmN)->getZExtValue(), Mask);
5496 if (Mask.empty()) return false;
5498 case X86ISD::MOVSLDUP:
5499 DecodeMOVSLDUPMask(VT, Mask);
5502 case X86ISD::MOVSHDUP:
5503 DecodeMOVSHDUPMask(VT, Mask);
5506 case X86ISD::MOVDDUP:
5507 DecodeMOVDDUPMask(VT, Mask);
5510 case X86ISD::MOVLHPD:
5511 case X86ISD::MOVLPD:
5512 case X86ISD::MOVLPS:
5513 // Not yet implemented
5515 default: llvm_unreachable("unknown target shuffle node");
5518 // If we have a fake unary shuffle, the shuffle mask is spread across two
5519 // inputs that are actually the same node. Re-map the mask to always point
5520 // into the first input.
5523 if (M >= (int)Mask.size())
5529 /// getShuffleScalarElt - Returns the scalar element that will make up the ith
5530 /// element of the result of the vector shuffle.
5531 static SDValue getShuffleScalarElt(SDNode *N, unsigned Index, SelectionDAG &DAG,
5534 return SDValue(); // Limit search depth.
5536 SDValue V = SDValue(N, 0);
5537 EVT VT = V.getValueType();
5538 unsigned Opcode = V.getOpcode();
5540 // Recurse into ISD::VECTOR_SHUFFLE node to find scalars.
5541 if (const ShuffleVectorSDNode *SV = dyn_cast<ShuffleVectorSDNode>(N)) {
5542 int Elt = SV->getMaskElt(Index);
5545 return DAG.getUNDEF(VT.getVectorElementType());
5547 unsigned NumElems = VT.getVectorNumElements();
5548 SDValue NewV = (Elt < (int)NumElems) ? SV->getOperand(0)
5549 : SV->getOperand(1);
5550 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG, Depth+1);
5553 // Recurse into target specific vector shuffles to find scalars.
5554 if (isTargetShuffle(Opcode)) {
5555 MVT ShufVT = V.getSimpleValueType();
5556 unsigned NumElems = ShufVT.getVectorNumElements();
5557 SmallVector<int, 16> ShuffleMask;
5560 if (!getTargetShuffleMask(N, ShufVT, ShuffleMask, IsUnary))
5563 int Elt = ShuffleMask[Index];
5565 return DAG.getUNDEF(ShufVT.getVectorElementType());
5567 SDValue NewV = (Elt < (int)NumElems) ? N->getOperand(0)
5569 return getShuffleScalarElt(NewV.getNode(), Elt % NumElems, DAG,
5573 // Actual nodes that may contain scalar elements
5574 if (Opcode == ISD::BITCAST) {
5575 V = V.getOperand(0);
5576 EVT SrcVT = V.getValueType();
5577 unsigned NumElems = VT.getVectorNumElements();
5579 if (!SrcVT.isVector() || SrcVT.getVectorNumElements() != NumElems)
5583 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR)
5584 return (Index == 0) ? V.getOperand(0)
5585 : DAG.getUNDEF(VT.getVectorElementType());
5587 if (V.getOpcode() == ISD::BUILD_VECTOR)
5588 return V.getOperand(Index);
5593 /// getNumOfConsecutiveZeros - Return the number of elements of a vector
5594 /// shuffle operation which come from a consecutively from a zero. The
5595 /// search can start in two different directions, from left or right.
5596 /// We count undefs as zeros until PreferredNum is reached.
5597 static unsigned getNumOfConsecutiveZeros(ShuffleVectorSDNode *SVOp,
5598 unsigned NumElems, bool ZerosFromLeft,
5600 unsigned PreferredNum = -1U) {
5601 unsigned NumZeros = 0;
5602 for (unsigned i = 0; i != NumElems; ++i) {
5603 unsigned Index = ZerosFromLeft ? i : NumElems - i - 1;
5604 SDValue Elt = getShuffleScalarElt(SVOp, Index, DAG, 0);
5608 if (X86::isZeroNode(Elt))
5610 else if (Elt.getOpcode() == ISD::UNDEF) // Undef as zero up to PreferredNum.
5611 NumZeros = std::min(NumZeros + 1, PreferredNum);
5619 /// isShuffleMaskConsecutive - Check if the shuffle mask indicies [MaskI, MaskE)
5620 /// correspond consecutively to elements from one of the vector operands,
5621 /// starting from its index OpIdx. Also tell OpNum which source vector operand.
5623 bool isShuffleMaskConsecutive(ShuffleVectorSDNode *SVOp,
5624 unsigned MaskI, unsigned MaskE, unsigned OpIdx,
5625 unsigned NumElems, unsigned &OpNum) {
5626 bool SeenV1 = false;
5627 bool SeenV2 = false;
5629 for (unsigned i = MaskI; i != MaskE; ++i, ++OpIdx) {
5630 int Idx = SVOp->getMaskElt(i);
5631 // Ignore undef indicies
5635 if (Idx < (int)NumElems)
5640 // Only accept consecutive elements from the same vector
5641 if ((Idx % NumElems != OpIdx) || (SeenV1 && SeenV2))
5645 OpNum = SeenV1 ? 0 : 1;
5649 /// isVectorShiftRight - Returns true if the shuffle can be implemented as a
5650 /// logical left shift of a vector.
5651 static bool isVectorShiftRight(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5652 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5654 SVOp->getSimpleValueType(0).getVectorNumElements();
5655 unsigned NumZeros = getNumOfConsecutiveZeros(
5656 SVOp, NumElems, false /* check zeros from right */, DAG,
5657 SVOp->getMaskElt(0));
5663 // Considering the elements in the mask that are not consecutive zeros,
5664 // check if they consecutively come from only one of the source vectors.
5666 // V1 = {X, A, B, C} 0
5668 // vector_shuffle V1, V2 <1, 2, 3, X>
5670 if (!isShuffleMaskConsecutive(SVOp,
5671 0, // Mask Start Index
5672 NumElems-NumZeros, // Mask End Index(exclusive)
5673 NumZeros, // Where to start looking in the src vector
5674 NumElems, // Number of elements in vector
5675 OpSrc)) // Which source operand ?
5680 ShVal = SVOp->getOperand(OpSrc);
5684 /// isVectorShiftLeft - Returns true if the shuffle can be implemented as a
5685 /// logical left shift of a vector.
5686 static bool isVectorShiftLeft(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5687 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5689 SVOp->getSimpleValueType(0).getVectorNumElements();
5690 unsigned NumZeros = getNumOfConsecutiveZeros(
5691 SVOp, NumElems, true /* check zeros from left */, DAG,
5692 NumElems - SVOp->getMaskElt(NumElems - 1) - 1);
5698 // Considering the elements in the mask that are not consecutive zeros,
5699 // check if they consecutively come from only one of the source vectors.
5701 // 0 { A, B, X, X } = V2
5703 // vector_shuffle V1, V2 <X, X, 4, 5>
5705 if (!isShuffleMaskConsecutive(SVOp,
5706 NumZeros, // Mask Start Index
5707 NumElems, // Mask End Index(exclusive)
5708 0, // Where to start looking in the src vector
5709 NumElems, // Number of elements in vector
5710 OpSrc)) // Which source operand ?
5715 ShVal = SVOp->getOperand(OpSrc);
5719 /// isVectorShift - Returns true if the shuffle can be implemented as a
5720 /// logical left or right shift of a vector.
5721 static bool isVectorShift(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG,
5722 bool &isLeft, SDValue &ShVal, unsigned &ShAmt) {
5723 // Although the logic below support any bitwidth size, there are no
5724 // shift instructions which handle more than 128-bit vectors.
5725 if (!SVOp->getSimpleValueType(0).is128BitVector())
5728 if (isVectorShiftLeft(SVOp, DAG, isLeft, ShVal, ShAmt) ||
5729 isVectorShiftRight(SVOp, DAG, isLeft, ShVal, ShAmt))
5735 /// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
5737 static SDValue LowerBuildVectorv16i8(SDValue Op, unsigned NonZeros,
5738 unsigned NumNonZero, unsigned NumZero,
5740 const X86Subtarget* Subtarget,
5741 const TargetLowering &TLI) {
5748 for (unsigned i = 0; i < 16; ++i) {
5749 bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
5750 if (ThisIsNonZero && First) {
5752 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5754 V = DAG.getUNDEF(MVT::v8i16);
5759 SDValue ThisElt, LastElt;
5760 bool LastIsNonZero = (NonZeros & (1 << (i-1))) != 0;
5761 if (LastIsNonZero) {
5762 LastElt = DAG.getNode(ISD::ZERO_EXTEND, dl,
5763 MVT::i16, Op.getOperand(i-1));
5765 if (ThisIsNonZero) {
5766 ThisElt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i16, Op.getOperand(i));
5767 ThisElt = DAG.getNode(ISD::SHL, dl, MVT::i16,
5768 ThisElt, DAG.getConstant(8, MVT::i8));
5770 ThisElt = DAG.getNode(ISD::OR, dl, MVT::i16, ThisElt, LastElt);
5774 if (ThisElt.getNode())
5775 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, V, ThisElt,
5776 DAG.getIntPtrConstant(i/2));
5780 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, V);
5783 /// LowerBuildVectorv8i16 - Custom lower build_vector of v8i16.
5785 static SDValue LowerBuildVectorv8i16(SDValue Op, unsigned NonZeros,
5786 unsigned NumNonZero, unsigned NumZero,
5788 const X86Subtarget* Subtarget,
5789 const TargetLowering &TLI) {
5796 for (unsigned i = 0; i < 8; ++i) {
5797 bool isNonZero = (NonZeros & (1 << i)) != 0;
5801 V = getZeroVector(MVT::v8i16, Subtarget, DAG, dl);
5803 V = DAG.getUNDEF(MVT::v8i16);
5806 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl,
5807 MVT::v8i16, V, Op.getOperand(i),
5808 DAG.getIntPtrConstant(i));
5815 /// LowerBuildVectorv4x32 - Custom lower build_vector of v4i32 or v4f32.
5816 static SDValue LowerBuildVectorv4x32(SDValue Op, SelectionDAG &DAG,
5817 const X86Subtarget *Subtarget,
5818 const TargetLowering &TLI) {
5819 // Find all zeroable elements.
5821 for (int i=0; i < 4; ++i) {
5822 SDValue Elt = Op->getOperand(i);
5823 Zeroable[i] = (Elt.getOpcode() == ISD::UNDEF || X86::isZeroNode(Elt));
5825 assert(std::count_if(&Zeroable[0], &Zeroable[4],
5826 [](bool M) { return !M; }) > 1 &&
5827 "We expect at least two non-zero elements!");
5829 // We only know how to deal with build_vector nodes where elements are either
5830 // zeroable or extract_vector_elt with constant index.
5831 SDValue FirstNonZero;
5832 unsigned FirstNonZeroIdx;
5833 for (unsigned i=0; i < 4; ++i) {
5836 SDValue Elt = Op->getOperand(i);
5837 if (Elt.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
5838 !isa<ConstantSDNode>(Elt.getOperand(1)))
5840 // Make sure that this node is extracting from a 128-bit vector.
5841 MVT VT = Elt.getOperand(0).getSimpleValueType();
5842 if (!VT.is128BitVector())
5844 if (!FirstNonZero.getNode()) {
5846 FirstNonZeroIdx = i;
5850 assert(FirstNonZero.getNode() && "Unexpected build vector of all zeros!");
5851 SDValue V1 = FirstNonZero.getOperand(0);
5852 MVT VT = V1.getSimpleValueType();
5854 // See if this build_vector can be lowered as a blend with zero.
5856 unsigned EltMaskIdx, EltIdx;
5858 for (EltIdx = 0; EltIdx < 4; ++EltIdx) {
5859 if (Zeroable[EltIdx]) {
5860 // The zero vector will be on the right hand side.
5861 Mask[EltIdx] = EltIdx+4;
5865 Elt = Op->getOperand(EltIdx);
5866 // By construction, Elt is a EXTRACT_VECTOR_ELT with constant index.
5867 EltMaskIdx = cast<ConstantSDNode>(Elt.getOperand(1))->getZExtValue();
5868 if (Elt.getOperand(0) != V1 || EltMaskIdx != EltIdx)
5870 Mask[EltIdx] = EltIdx;
5874 // Let the shuffle legalizer deal with blend operations.
5875 SDValue VZero = getZeroVector(VT, Subtarget, DAG, SDLoc(Op));
5876 if (V1.getSimpleValueType() != VT)
5877 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), VT, V1);
5878 return DAG.getVectorShuffle(VT, SDLoc(V1), V1, VZero, &Mask[0]);
5881 // See if we can lower this build_vector to a INSERTPS.
5882 if (!Subtarget->hasSSE41())
5885 SDValue V2 = Elt.getOperand(0);
5886 if (Elt == FirstNonZero && EltIdx == FirstNonZeroIdx)
5889 bool CanFold = true;
5890 for (unsigned i = EltIdx + 1; i < 4 && CanFold; ++i) {
5894 SDValue Current = Op->getOperand(i);
5895 SDValue SrcVector = Current->getOperand(0);
5898 CanFold = SrcVector == V1 &&
5899 cast<ConstantSDNode>(Current.getOperand(1))->getZExtValue() == i;
5905 assert(V1.getNode() && "Expected at least two non-zero elements!");
5906 if (V1.getSimpleValueType() != MVT::v4f32)
5907 V1 = DAG.getNode(ISD::BITCAST, SDLoc(V1), MVT::v4f32, V1);
5908 if (V2.getSimpleValueType() != MVT::v4f32)
5909 V2 = DAG.getNode(ISD::BITCAST, SDLoc(V2), MVT::v4f32, V2);
5911 // Ok, we can emit an INSERTPS instruction.
5913 for (int i = 0; i < 4; ++i)
5917 unsigned InsertPSMask = EltMaskIdx << 6 | EltIdx << 4 | ZMask;
5918 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
5919 SDValue Result = DAG.getNode(X86ISD::INSERTPS, SDLoc(Op), MVT::v4f32, V1, V2,
5920 DAG.getIntPtrConstant(InsertPSMask));
5921 return DAG.getNode(ISD::BITCAST, SDLoc(Op), VT, Result);
5924 /// Return a vector logical shift node.
5925 static SDValue getVShift(bool isLeft, EVT VT, SDValue SrcOp,
5926 unsigned NumBits, SelectionDAG &DAG,
5927 const TargetLowering &TLI, SDLoc dl) {
5928 assert(VT.is128BitVector() && "Unknown type for VShift");
5929 MVT ShVT = MVT::v2i64;
5930 unsigned Opc = isLeft ? X86ISD::VSHLDQ : X86ISD::VSRLDQ;
5931 SrcOp = DAG.getNode(ISD::BITCAST, dl, ShVT, SrcOp);
5932 MVT ScalarShiftTy = TLI.getScalarShiftAmountTy(SrcOp.getValueType());
5933 SDValue ShiftVal = DAG.getConstant(NumBits, ScalarShiftTy);
5934 return DAG.getNode(ISD::BITCAST, dl, VT,
5935 DAG.getNode(Opc, dl, ShVT, SrcOp, ShiftVal));
5939 LowerAsSplatVectorLoad(SDValue SrcOp, MVT VT, SDLoc dl, SelectionDAG &DAG) {
5941 // Check if the scalar load can be widened into a vector load. And if
5942 // the address is "base + cst" see if the cst can be "absorbed" into
5943 // the shuffle mask.
5944 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(SrcOp)) {
5945 SDValue Ptr = LD->getBasePtr();
5946 if (!ISD::isNormalLoad(LD) || LD->isVolatile())
5948 EVT PVT = LD->getValueType(0);
5949 if (PVT != MVT::i32 && PVT != MVT::f32)
5954 if (FrameIndexSDNode *FINode = dyn_cast<FrameIndexSDNode>(Ptr)) {
5955 FI = FINode->getIndex();
5957 } else if (DAG.isBaseWithConstantOffset(Ptr) &&
5958 isa<FrameIndexSDNode>(Ptr.getOperand(0))) {
5959 FI = cast<FrameIndexSDNode>(Ptr.getOperand(0))->getIndex();
5960 Offset = Ptr.getConstantOperandVal(1);
5961 Ptr = Ptr.getOperand(0);
5966 // FIXME: 256-bit vector instructions don't require a strict alignment,
5967 // improve this code to support it better.
5968 unsigned RequiredAlign = VT.getSizeInBits()/8;
5969 SDValue Chain = LD->getChain();
5970 // Make sure the stack object alignment is at least 16 or 32.
5971 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
5972 if (DAG.InferPtrAlignment(Ptr) < RequiredAlign) {
5973 if (MFI->isFixedObjectIndex(FI)) {
5974 // Can't change the alignment. FIXME: It's possible to compute
5975 // the exact stack offset and reference FI + adjust offset instead.
5976 // If someone *really* cares about this. That's the way to implement it.
5979 MFI->setObjectAlignment(FI, RequiredAlign);
5983 // (Offset % 16 or 32) must be multiple of 4. Then address is then
5984 // Ptr + (Offset & ~15).
5987 if ((Offset % RequiredAlign) & 3)
5989 int64_t StartOffset = Offset & ~(RequiredAlign-1);
5991 Ptr = DAG.getNode(ISD::ADD, SDLoc(Ptr), Ptr.getValueType(),
5992 Ptr,DAG.getConstant(StartOffset, Ptr.getValueType()));
5994 int EltNo = (Offset - StartOffset) >> 2;
5995 unsigned NumElems = VT.getVectorNumElements();
5997 EVT NVT = EVT::getVectorVT(*DAG.getContext(), PVT, NumElems);
5998 SDValue V1 = DAG.getLoad(NVT, dl, Chain, Ptr,
5999 LD->getPointerInfo().getWithOffset(StartOffset),
6000 false, false, false, 0);
6002 SmallVector<int, 8> Mask;
6003 for (unsigned i = 0; i != NumElems; ++i)
6004 Mask.push_back(EltNo);
6006 return DAG.getVectorShuffle(NVT, dl, V1, DAG.getUNDEF(NVT), &Mask[0]);
6012 /// Given the initializing elements 'Elts' of a vector of type 'VT', see if the
6013 /// elements can be replaced by a single large load which has the same value as
6014 /// a build_vector or insert_subvector whose loaded operands are 'Elts'.
6016 /// Example: <load i32 *a, load i32 *a+4, undef, undef> -> zextload a
6018 /// FIXME: we'd also like to handle the case where the last elements are zero
6019 /// rather than undef via VZEXT_LOAD, but we do not detect that case today.
6020 /// There's even a handy isZeroNode for that purpose.
6021 static SDValue EltsFromConsecutiveLoads(EVT VT, ArrayRef<SDValue> Elts,
6022 SDLoc &DL, SelectionDAG &DAG,
6023 bool isAfterLegalize) {
6024 unsigned NumElems = Elts.size();
6026 LoadSDNode *LDBase = nullptr;
6027 unsigned LastLoadedElt = -1U;
6029 // For each element in the initializer, see if we've found a load or an undef.
6030 // If we don't find an initial load element, or later load elements are
6031 // non-consecutive, bail out.
6032 for (unsigned i = 0; i < NumElems; ++i) {
6033 SDValue Elt = Elts[i];
6034 // Look through a bitcast.
6035 if (Elt.getNode() && Elt.getOpcode() == ISD::BITCAST)
6036 Elt = Elt.getOperand(0);
6037 if (!Elt.getNode() ||
6038 (Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.getNode())))
6041 if (Elt.getNode()->getOpcode() == ISD::UNDEF)
6043 LDBase = cast<LoadSDNode>(Elt.getNode());
6047 if (Elt.getOpcode() == ISD::UNDEF)
6050 LoadSDNode *LD = cast<LoadSDNode>(Elt);
6051 EVT LdVT = Elt.getValueType();
6052 // Each loaded element must be the correct fractional portion of the
6053 // requested vector load.
6054 if (LdVT.getSizeInBits() != VT.getSizeInBits() / NumElems)
6056 if (!DAG.isConsecutiveLoad(LD, LDBase, LdVT.getSizeInBits() / 8, i))
6061 // If we have found an entire vector of loads and undefs, then return a large
6062 // load of the entire vector width starting at the base pointer. If we found
6063 // consecutive loads for the low half, generate a vzext_load node.
6064 if (LastLoadedElt == NumElems - 1) {
6065 assert(LDBase && "Did not find base load for merging consecutive loads");
6066 EVT EltVT = LDBase->getValueType(0);
6067 // Ensure that the input vector size for the merged loads matches the
6068 // cumulative size of the input elements.
6069 if (VT.getSizeInBits() != EltVT.getSizeInBits() * NumElems)
6072 if (isAfterLegalize &&
6073 !DAG.getTargetLoweringInfo().isOperationLegal(ISD::LOAD, VT))
6076 SDValue NewLd = SDValue();
6078 NewLd = DAG.getLoad(VT, DL, LDBase->getChain(), LDBase->getBasePtr(),
6079 LDBase->getPointerInfo(), LDBase->isVolatile(),
6080 LDBase->isNonTemporal(), LDBase->isInvariant(),
6081 LDBase->getAlignment());
6083 if (LDBase->hasAnyUseOfValue(1)) {
6084 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6086 SDValue(NewLd.getNode(), 1));
6087 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6088 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6089 SDValue(NewLd.getNode(), 1));
6095 //TODO: The code below fires only for for loading the low v2i32 / v2f32
6096 //of a v4i32 / v4f32. It's probably worth generalizing.
6097 EVT EltVT = VT.getVectorElementType();
6098 if (NumElems == 4 && LastLoadedElt == 1 && (EltVT.getSizeInBits() == 32) &&
6099 DAG.getTargetLoweringInfo().isTypeLegal(MVT::v2i64)) {
6100 SDVTList Tys = DAG.getVTList(MVT::v2i64, MVT::Other);
6101 SDValue Ops[] = { LDBase->getChain(), LDBase->getBasePtr() };
6103 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, DL, Tys, Ops, MVT::i64,
6104 LDBase->getPointerInfo(),
6105 LDBase->getAlignment(),
6106 false/*isVolatile*/, true/*ReadMem*/,
6109 // Make sure the newly-created LOAD is in the same position as LDBase in
6110 // terms of dependency. We create a TokenFactor for LDBase and ResNode, and
6111 // update uses of LDBase's output chain to use the TokenFactor.
6112 if (LDBase->hasAnyUseOfValue(1)) {
6113 SDValue NewChain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
6114 SDValue(LDBase, 1), SDValue(ResNode.getNode(), 1));
6115 DAG.ReplaceAllUsesOfValueWith(SDValue(LDBase, 1), NewChain);
6116 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(LDBase, 1),
6117 SDValue(ResNode.getNode(), 1));
6120 return DAG.getNode(ISD::BITCAST, DL, VT, ResNode);
6125 /// LowerVectorBroadcast - Attempt to use the vbroadcast instruction
6126 /// to generate a splat value for the following cases:
6127 /// 1. A splat BUILD_VECTOR which uses a single scalar load, or a constant.
6128 /// 2. A splat shuffle which uses a scalar_to_vector node which comes from
6129 /// a scalar load, or a constant.
6130 /// The VBROADCAST node is returned when a pattern is found,
6131 /// or SDValue() otherwise.
6132 static SDValue LowerVectorBroadcast(SDValue Op, const X86Subtarget* Subtarget,
6133 SelectionDAG &DAG) {
6134 // VBROADCAST requires AVX.
6135 // TODO: Splats could be generated for non-AVX CPUs using SSE
6136 // instructions, but there's less potential gain for only 128-bit vectors.
6137 if (!Subtarget->hasAVX())
6140 MVT VT = Op.getSimpleValueType();
6143 assert((VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) &&
6144 "Unsupported vector type for broadcast.");
6149 switch (Op.getOpcode()) {
6151 // Unknown pattern found.
6154 case ISD::BUILD_VECTOR: {
6155 auto *BVOp = cast<BuildVectorSDNode>(Op.getNode());
6156 BitVector UndefElements;
6157 SDValue Splat = BVOp->getSplatValue(&UndefElements);
6159 // We need a splat of a single value to use broadcast, and it doesn't
6160 // make any sense if the value is only in one element of the vector.
6161 if (!Splat || (VT.getVectorNumElements() - UndefElements.count()) <= 1)
6165 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6166 Ld.getOpcode() == ISD::ConstantFP);
6168 // Make sure that all of the users of a non-constant load are from the
6169 // BUILD_VECTOR node.
6170 if (!ConstSplatVal && !BVOp->isOnlyUserOf(Ld.getNode()))
6175 case ISD::VECTOR_SHUFFLE: {
6176 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
6178 // Shuffles must have a splat mask where the first element is
6180 if ((!SVOp->isSplat()) || SVOp->getMaskElt(0) != 0)
6183 SDValue Sc = Op.getOperand(0);
6184 if (Sc.getOpcode() != ISD::SCALAR_TO_VECTOR &&
6185 Sc.getOpcode() != ISD::BUILD_VECTOR) {
6187 if (!Subtarget->hasInt256())
6190 // Use the register form of the broadcast instruction available on AVX2.
6191 if (VT.getSizeInBits() >= 256)
6192 Sc = Extract128BitVector(Sc, 0, DAG, dl);
6193 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Sc);
6196 Ld = Sc.getOperand(0);
6197 ConstSplatVal = (Ld.getOpcode() == ISD::Constant ||
6198 Ld.getOpcode() == ISD::ConstantFP);
6200 // The scalar_to_vector node and the suspected
6201 // load node must have exactly one user.
6202 // Constants may have multiple users.
6204 // AVX-512 has register version of the broadcast
6205 bool hasRegVer = Subtarget->hasAVX512() && VT.is512BitVector() &&
6206 Ld.getValueType().getSizeInBits() >= 32;
6207 if (!ConstSplatVal && ((!Sc.hasOneUse() || !Ld.hasOneUse()) &&
6214 unsigned ScalarSize = Ld.getValueType().getSizeInBits();
6215 bool IsGE256 = (VT.getSizeInBits() >= 256);
6217 // When optimizing for size, generate up to 5 extra bytes for a broadcast
6218 // instruction to save 8 or more bytes of constant pool data.
6219 // TODO: If multiple splats are generated to load the same constant,
6220 // it may be detrimental to overall size. There needs to be a way to detect
6221 // that condition to know if this is truly a size win.
6222 const Function *F = DAG.getMachineFunction().getFunction();
6223 bool OptForSize = F->hasFnAttribute(Attribute::OptimizeForSize);
6225 // Handle broadcasting a single constant scalar from the constant pool
6227 // On Sandybridge (no AVX2), it is still better to load a constant vector
6228 // from the constant pool and not to broadcast it from a scalar.
6229 // But override that restriction when optimizing for size.
6230 // TODO: Check if splatting is recommended for other AVX-capable CPUs.
6231 if (ConstSplatVal && (Subtarget->hasAVX2() || OptForSize)) {
6232 EVT CVT = Ld.getValueType();
6233 assert(!CVT.isVector() && "Must not broadcast a vector type");
6235 // Splat f32, i32, v4f64, v4i64 in all cases with AVX2.
6236 // For size optimization, also splat v2f64 and v2i64, and for size opt
6237 // with AVX2, also splat i8 and i16.
6238 // With pattern matching, the VBROADCAST node may become a VMOVDDUP.
6239 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6240 (OptForSize && (ScalarSize == 64 || Subtarget->hasAVX2()))) {
6241 const Constant *C = nullptr;
6242 if (ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Ld))
6243 C = CI->getConstantIntValue();
6244 else if (ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(Ld))
6245 C = CF->getConstantFPValue();
6247 assert(C && "Invalid constant type");
6249 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6250 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
6251 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
6252 Ld = DAG.getLoad(CVT, dl, DAG.getEntryNode(), CP,
6253 MachinePointerInfo::getConstantPool(),
6254 false, false, false, Alignment);
6256 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6260 bool IsLoad = ISD::isNormalLoad(Ld.getNode());
6262 // Handle AVX2 in-register broadcasts.
6263 if (!IsLoad && Subtarget->hasInt256() &&
6264 (ScalarSize == 32 || (IsGE256 && ScalarSize == 64)))
6265 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6267 // The scalar source must be a normal load.
6271 if (ScalarSize == 32 || (IsGE256 && ScalarSize == 64) ||
6272 (Subtarget->hasVLX() && ScalarSize == 64))
6273 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6275 // The integer check is needed for the 64-bit into 128-bit so it doesn't match
6276 // double since there is no vbroadcastsd xmm
6277 if (Subtarget->hasInt256() && Ld.getValueType().isInteger()) {
6278 if (ScalarSize == 8 || ScalarSize == 16 || ScalarSize == 64)
6279 return DAG.getNode(X86ISD::VBROADCAST, dl, VT, Ld);
6282 // Unsupported broadcast.
6286 /// \brief For an EXTRACT_VECTOR_ELT with a constant index return the real
6287 /// underlying vector and index.
6289 /// Modifies \p ExtractedFromVec to the real vector and returns the real
6291 static int getUnderlyingExtractedFromVec(SDValue &ExtractedFromVec,
6293 int Idx = cast<ConstantSDNode>(ExtIdx)->getZExtValue();
6294 if (!isa<ShuffleVectorSDNode>(ExtractedFromVec))
6297 // For 256-bit vectors, LowerEXTRACT_VECTOR_ELT_SSE4 may have already
6299 // (extract_vector_elt (v8f32 %vreg1), Constant<6>)
6301 // (extract_vector_elt (vector_shuffle<2,u,u,u>
6302 // (extract_subvector (v8f32 %vreg0), Constant<4>),
6305 // In this case the vector is the extract_subvector expression and the index
6306 // is 2, as specified by the shuffle.
6307 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(ExtractedFromVec);
6308 SDValue ShuffleVec = SVOp->getOperand(0);
6309 MVT ShuffleVecVT = ShuffleVec.getSimpleValueType();
6310 assert(ShuffleVecVT.getVectorElementType() ==
6311 ExtractedFromVec.getSimpleValueType().getVectorElementType());
6313 int ShuffleIdx = SVOp->getMaskElt(Idx);
6314 if (isUndefOrInRange(ShuffleIdx, 0, ShuffleVecVT.getVectorNumElements())) {
6315 ExtractedFromVec = ShuffleVec;
6321 static SDValue buildFromShuffleMostly(SDValue Op, SelectionDAG &DAG) {
6322 MVT VT = Op.getSimpleValueType();
6324 // Skip if insert_vec_elt is not supported.
6325 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
6326 if (!TLI.isOperationLegalOrCustom(ISD::INSERT_VECTOR_ELT, VT))
6330 unsigned NumElems = Op.getNumOperands();
6334 SmallVector<unsigned, 4> InsertIndices;
6335 SmallVector<int, 8> Mask(NumElems, -1);
6337 for (unsigned i = 0; i != NumElems; ++i) {
6338 unsigned Opc = Op.getOperand(i).getOpcode();
6340 if (Opc == ISD::UNDEF)
6343 if (Opc != ISD::EXTRACT_VECTOR_ELT) {
6344 // Quit if more than 1 elements need inserting.
6345 if (InsertIndices.size() > 1)
6348 InsertIndices.push_back(i);
6352 SDValue ExtractedFromVec = Op.getOperand(i).getOperand(0);
6353 SDValue ExtIdx = Op.getOperand(i).getOperand(1);
6354 // Quit if non-constant index.
6355 if (!isa<ConstantSDNode>(ExtIdx))
6357 int Idx = getUnderlyingExtractedFromVec(ExtractedFromVec, ExtIdx);
6359 // Quit if extracted from vector of different type.
6360 if (ExtractedFromVec.getValueType() != VT)
6363 if (!VecIn1.getNode())
6364 VecIn1 = ExtractedFromVec;
6365 else if (VecIn1 != ExtractedFromVec) {
6366 if (!VecIn2.getNode())
6367 VecIn2 = ExtractedFromVec;
6368 else if (VecIn2 != ExtractedFromVec)
6369 // Quit if more than 2 vectors to shuffle
6373 if (ExtractedFromVec == VecIn1)
6375 else if (ExtractedFromVec == VecIn2)
6376 Mask[i] = Idx + NumElems;
6379 if (!VecIn1.getNode())
6382 VecIn2 = VecIn2.getNode() ? VecIn2 : DAG.getUNDEF(VT);
6383 SDValue NV = DAG.getVectorShuffle(VT, DL, VecIn1, VecIn2, &Mask[0]);
6384 for (unsigned i = 0, e = InsertIndices.size(); i != e; ++i) {
6385 unsigned Idx = InsertIndices[i];
6386 NV = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, NV, Op.getOperand(Idx),
6387 DAG.getIntPtrConstant(Idx));
6393 // Lower BUILD_VECTOR operation for v8i1 and v16i1 types.
6395 X86TargetLowering::LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const {
6397 MVT VT = Op.getSimpleValueType();
6398 assert((VT.getVectorElementType() == MVT::i1) && (VT.getSizeInBits() <= 16) &&
6399 "Unexpected type in LowerBUILD_VECTORvXi1!");
6402 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6403 SDValue Cst = DAG.getTargetConstant(0, MVT::i1);
6404 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6405 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6408 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
6409 SDValue Cst = DAG.getTargetConstant(1, MVT::i1);
6410 SmallVector<SDValue, 16> Ops(VT.getVectorNumElements(), Cst);
6411 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ops);
6414 bool AllContants = true;
6415 uint64_t Immediate = 0;
6416 int NonConstIdx = -1;
6417 bool IsSplat = true;
6418 unsigned NumNonConsts = 0;
6419 unsigned NumConsts = 0;
6420 for (unsigned idx = 0, e = Op.getNumOperands(); idx < e; ++idx) {
6421 SDValue In = Op.getOperand(idx);
6422 if (In.getOpcode() == ISD::UNDEF)
6424 if (!isa<ConstantSDNode>(In)) {
6425 AllContants = false;
6430 if (cast<ConstantSDNode>(In)->getZExtValue())
6431 Immediate |= (1ULL << idx);
6433 if (In != Op.getOperand(0))
6438 SDValue FullMask = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1,
6439 DAG.getConstant(Immediate, MVT::i16));
6440 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, FullMask,
6441 DAG.getIntPtrConstant(0));
6444 if (NumNonConsts == 1 && NonConstIdx != 0) {
6447 SDValue VecAsImm = DAG.getConstant(Immediate,
6448 MVT::getIntegerVT(VT.getSizeInBits()));
6449 DstVec = DAG.getNode(ISD::BITCAST, dl, VT, VecAsImm);
6452 DstVec = DAG.getUNDEF(VT);
6453 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, DstVec,
6454 Op.getOperand(NonConstIdx),
6455 DAG.getIntPtrConstant(NonConstIdx));
6457 if (!IsSplat && (NonConstIdx != 0))
6458 llvm_unreachable("Unsupported BUILD_VECTOR operation");
6459 MVT SelectVT = (VT == MVT::v16i1)? MVT::i16 : MVT::i8;
6462 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6463 DAG.getConstant(-1, SelectVT),
6464 DAG.getConstant(0, SelectVT));
6466 Select = DAG.getNode(ISD::SELECT, dl, SelectVT, Op.getOperand(0),
6467 DAG.getConstant((Immediate | 1), SelectVT),
6468 DAG.getConstant(Immediate, SelectVT));
6469 return DAG.getNode(ISD::BITCAST, dl, VT, Select);
6472 /// \brief Return true if \p N implements a horizontal binop and return the
6473 /// operands for the horizontal binop into V0 and V1.
6475 /// This is a helper function of PerformBUILD_VECTORCombine.
6476 /// This function checks that the build_vector \p N in input implements a
6477 /// horizontal operation. Parameter \p Opcode defines the kind of horizontal
6478 /// operation to match.
6479 /// For example, if \p Opcode is equal to ISD::ADD, then this function
6480 /// checks if \p N implements a horizontal arithmetic add; if instead \p Opcode
6481 /// is equal to ISD::SUB, then this function checks if this is a horizontal
6484 /// This function only analyzes elements of \p N whose indices are
6485 /// in range [BaseIdx, LastIdx).
6486 static bool isHorizontalBinOp(const BuildVectorSDNode *N, unsigned Opcode,
6488 unsigned BaseIdx, unsigned LastIdx,
6489 SDValue &V0, SDValue &V1) {
6490 EVT VT = N->getValueType(0);
6492 assert(BaseIdx * 2 <= LastIdx && "Invalid Indices in input!");
6493 assert(VT.isVector() && VT.getVectorNumElements() >= LastIdx &&
6494 "Invalid Vector in input!");
6496 bool IsCommutable = (Opcode == ISD::ADD || Opcode == ISD::FADD);
6497 bool CanFold = true;
6498 unsigned ExpectedVExtractIdx = BaseIdx;
6499 unsigned NumElts = LastIdx - BaseIdx;
6500 V0 = DAG.getUNDEF(VT);
6501 V1 = DAG.getUNDEF(VT);
6503 // Check if N implements a horizontal binop.
6504 for (unsigned i = 0, e = NumElts; i != e && CanFold; ++i) {
6505 SDValue Op = N->getOperand(i + BaseIdx);
6508 if (Op->getOpcode() == ISD::UNDEF) {
6509 // Update the expected vector extract index.
6510 if (i * 2 == NumElts)
6511 ExpectedVExtractIdx = BaseIdx;
6512 ExpectedVExtractIdx += 2;
6516 CanFold = Op->getOpcode() == Opcode && Op->hasOneUse();
6521 SDValue Op0 = Op.getOperand(0);
6522 SDValue Op1 = Op.getOperand(1);
6524 // Try to match the following pattern:
6525 // (BINOP (extract_vector_elt A, I), (extract_vector_elt A, I+1))
6526 CanFold = (Op0.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6527 Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
6528 Op0.getOperand(0) == Op1.getOperand(0) &&
6529 isa<ConstantSDNode>(Op0.getOperand(1)) &&
6530 isa<ConstantSDNode>(Op1.getOperand(1)));
6534 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6535 unsigned I1 = cast<ConstantSDNode>(Op1.getOperand(1))->getZExtValue();
6537 if (i * 2 < NumElts) {
6538 if (V0.getOpcode() == ISD::UNDEF)
6539 V0 = Op0.getOperand(0);
6541 if (V1.getOpcode() == ISD::UNDEF)
6542 V1 = Op0.getOperand(0);
6543 if (i * 2 == NumElts)
6544 ExpectedVExtractIdx = BaseIdx;
6547 SDValue Expected = (i * 2 < NumElts) ? V0 : V1;
6548 if (I0 == ExpectedVExtractIdx)
6549 CanFold = I1 == I0 + 1 && Op0.getOperand(0) == Expected;
6550 else if (IsCommutable && I1 == ExpectedVExtractIdx) {
6551 // Try to match the following dag sequence:
6552 // (BINOP (extract_vector_elt A, I+1), (extract_vector_elt A, I))
6553 CanFold = I0 == I1 + 1 && Op1.getOperand(0) == Expected;
6557 ExpectedVExtractIdx += 2;
6563 /// \brief Emit a sequence of two 128-bit horizontal add/sub followed by
6564 /// a concat_vector.
6566 /// This is a helper function of PerformBUILD_VECTORCombine.
6567 /// This function expects two 256-bit vectors called V0 and V1.
6568 /// At first, each vector is split into two separate 128-bit vectors.
6569 /// Then, the resulting 128-bit vectors are used to implement two
6570 /// horizontal binary operations.
6572 /// The kind of horizontal binary operation is defined by \p X86Opcode.
6574 /// \p Mode specifies how the 128-bit parts of V0 and V1 are passed in input to
6575 /// the two new horizontal binop.
6576 /// When Mode is set, the first horizontal binop dag node would take as input
6577 /// the lower 128-bit of V0 and the upper 128-bit of V0. The second
6578 /// horizontal binop dag node would take as input the lower 128-bit of V1
6579 /// and the upper 128-bit of V1.
6581 /// HADD V0_LO, V0_HI
6582 /// HADD V1_LO, V1_HI
6584 /// Otherwise, the first horizontal binop dag node takes as input the lower
6585 /// 128-bit of V0 and the lower 128-bit of V1, and the second horizontal binop
6586 /// dag node takes the the upper 128-bit of V0 and the upper 128-bit of V1.
6588 /// HADD V0_LO, V1_LO
6589 /// HADD V0_HI, V1_HI
6591 /// If \p isUndefLO is set, then the algorithm propagates UNDEF to the lower
6592 /// 128-bits of the result. If \p isUndefHI is set, then UNDEF is propagated to
6593 /// the upper 128-bits of the result.
6594 static SDValue ExpandHorizontalBinOp(const SDValue &V0, const SDValue &V1,
6595 SDLoc DL, SelectionDAG &DAG,
6596 unsigned X86Opcode, bool Mode,
6597 bool isUndefLO, bool isUndefHI) {
6598 EVT VT = V0.getValueType();
6599 assert(VT.is256BitVector() && VT == V1.getValueType() &&
6600 "Invalid nodes in input!");
6602 unsigned NumElts = VT.getVectorNumElements();
6603 SDValue V0_LO = Extract128BitVector(V0, 0, DAG, DL);
6604 SDValue V0_HI = Extract128BitVector(V0, NumElts/2, DAG, DL);
6605 SDValue V1_LO = Extract128BitVector(V1, 0, DAG, DL);
6606 SDValue V1_HI = Extract128BitVector(V1, NumElts/2, DAG, DL);
6607 EVT NewVT = V0_LO.getValueType();
6609 SDValue LO = DAG.getUNDEF(NewVT);
6610 SDValue HI = DAG.getUNDEF(NewVT);
6613 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6614 if (!isUndefLO && V0->getOpcode() != ISD::UNDEF)
6615 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V0_HI);
6616 if (!isUndefHI && V1->getOpcode() != ISD::UNDEF)
6617 HI = DAG.getNode(X86Opcode, DL, NewVT, V1_LO, V1_HI);
6619 // Don't emit a horizontal binop if the result is expected to be UNDEF.
6620 if (!isUndefLO && (V0_LO->getOpcode() != ISD::UNDEF ||
6621 V1_LO->getOpcode() != ISD::UNDEF))
6622 LO = DAG.getNode(X86Opcode, DL, NewVT, V0_LO, V1_LO);
6624 if (!isUndefHI && (V0_HI->getOpcode() != ISD::UNDEF ||
6625 V1_HI->getOpcode() != ISD::UNDEF))
6626 HI = DAG.getNode(X86Opcode, DL, NewVT, V0_HI, V1_HI);
6629 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LO, HI);
6632 /// \brief Try to fold a build_vector that performs an 'addsub' into the
6633 /// sequence of 'vadd + vsub + blendi'.
6634 static SDValue matchAddSub(const BuildVectorSDNode *BV, SelectionDAG &DAG,
6635 const X86Subtarget *Subtarget) {
6637 EVT VT = BV->getValueType(0);
6638 unsigned NumElts = VT.getVectorNumElements();
6639 SDValue InVec0 = DAG.getUNDEF(VT);
6640 SDValue InVec1 = DAG.getUNDEF(VT);
6642 assert((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v4f32 ||
6643 VT == MVT::v2f64) && "build_vector with an invalid type found!");
6645 // Odd-numbered elements in the input build vector are obtained from
6646 // adding two integer/float elements.
6647 // Even-numbered elements in the input build vector are obtained from
6648 // subtracting two integer/float elements.
6649 unsigned ExpectedOpcode = ISD::FSUB;
6650 unsigned NextExpectedOpcode = ISD::FADD;
6651 bool AddFound = false;
6652 bool SubFound = false;
6654 for (unsigned i = 0, e = NumElts; i != e; ++i) {
6655 SDValue Op = BV->getOperand(i);
6657 // Skip 'undef' values.
6658 unsigned Opcode = Op.getOpcode();
6659 if (Opcode == ISD::UNDEF) {
6660 std::swap(ExpectedOpcode, NextExpectedOpcode);
6664 // Early exit if we found an unexpected opcode.
6665 if (Opcode != ExpectedOpcode)
6668 SDValue Op0 = Op.getOperand(0);
6669 SDValue Op1 = Op.getOperand(1);
6671 // Try to match the following pattern:
6672 // (BINOP (extract_vector_elt A, i), (extract_vector_elt B, i))
6673 // Early exit if we cannot match that sequence.
6674 if (Op0.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6675 Op1.getOpcode() != ISD::EXTRACT_VECTOR_ELT ||
6676 !isa<ConstantSDNode>(Op0.getOperand(1)) ||
6677 !isa<ConstantSDNode>(Op1.getOperand(1)) ||
6678 Op0.getOperand(1) != Op1.getOperand(1))
6681 unsigned I0 = cast<ConstantSDNode>(Op0.getOperand(1))->getZExtValue();
6685 // We found a valid add/sub node. Update the information accordingly.
6691 // Update InVec0 and InVec1.
6692 if (InVec0.getOpcode() == ISD::UNDEF)
6693 InVec0 = Op0.getOperand(0);
6694 if (InVec1.getOpcode() == ISD::UNDEF)
6695 InVec1 = Op1.getOperand(0);
6697 // Make sure that operands in input to each add/sub node always
6698 // come from a same pair of vectors.
6699 if (InVec0 != Op0.getOperand(0)) {
6700 if (ExpectedOpcode == ISD::FSUB)
6703 // FADD is commutable. Try to commute the operands
6704 // and then test again.
6705 std::swap(Op0, Op1);
6706 if (InVec0 != Op0.getOperand(0))
6710 if (InVec1 != Op1.getOperand(0))
6713 // Update the pair of expected opcodes.
6714 std::swap(ExpectedOpcode, NextExpectedOpcode);
6717 // Don't try to fold this build_vector into an ADDSUB if the inputs are undef.
6718 if (AddFound && SubFound && InVec0.getOpcode() != ISD::UNDEF &&
6719 InVec1.getOpcode() != ISD::UNDEF)
6720 return DAG.getNode(X86ISD::ADDSUB, DL, VT, InVec0, InVec1);
6725 static SDValue PerformBUILD_VECTORCombine(SDNode *N, SelectionDAG &DAG,
6726 const X86Subtarget *Subtarget) {
6728 EVT VT = N->getValueType(0);
6729 unsigned NumElts = VT.getVectorNumElements();
6730 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(N);
6731 SDValue InVec0, InVec1;
6733 // Try to match an ADDSUB.
6734 if ((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
6735 (Subtarget->hasAVX() && (VT == MVT::v8f32 || VT == MVT::v4f64))) {
6736 SDValue Value = matchAddSub(BV, DAG, Subtarget);
6737 if (Value.getNode())
6741 // Try to match horizontal ADD/SUB.
6742 unsigned NumUndefsLO = 0;
6743 unsigned NumUndefsHI = 0;
6744 unsigned Half = NumElts/2;
6746 // Count the number of UNDEF operands in the build_vector in input.
6747 for (unsigned i = 0, e = Half; i != e; ++i)
6748 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6751 for (unsigned i = Half, e = NumElts; i != e; ++i)
6752 if (BV->getOperand(i)->getOpcode() == ISD::UNDEF)
6755 // Early exit if this is either a build_vector of all UNDEFs or all the
6756 // operands but one are UNDEF.
6757 if (NumUndefsLO + NumUndefsHI + 1 >= NumElts)
6760 if ((VT == MVT::v4f32 || VT == MVT::v2f64) && Subtarget->hasSSE3()) {
6761 // Try to match an SSE3 float HADD/HSUB.
6762 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6763 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6765 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6766 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6767 } else if ((VT == MVT::v4i32 || VT == MVT::v8i16) && Subtarget->hasSSSE3()) {
6768 // Try to match an SSSE3 integer HADD/HSUB.
6769 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6770 return DAG.getNode(X86ISD::HADD, DL, VT, InVec0, InVec1);
6772 if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6773 return DAG.getNode(X86ISD::HSUB, DL, VT, InVec0, InVec1);
6776 if (!Subtarget->hasAVX())
6779 if ((VT == MVT::v8f32 || VT == MVT::v4f64)) {
6780 // Try to match an AVX horizontal add/sub of packed single/double
6781 // precision floating point values from 256-bit vectors.
6782 SDValue InVec2, InVec3;
6783 if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, Half, InVec0, InVec1) &&
6784 isHorizontalBinOp(BV, ISD::FADD, DAG, Half, NumElts, InVec2, InVec3) &&
6785 ((InVec0.getOpcode() == ISD::UNDEF ||
6786 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6787 ((InVec1.getOpcode() == ISD::UNDEF ||
6788 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6789 return DAG.getNode(X86ISD::FHADD, DL, VT, InVec0, InVec1);
6791 if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, Half, InVec0, InVec1) &&
6792 isHorizontalBinOp(BV, ISD::FSUB, DAG, Half, NumElts, InVec2, InVec3) &&
6793 ((InVec0.getOpcode() == ISD::UNDEF ||
6794 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6795 ((InVec1.getOpcode() == ISD::UNDEF ||
6796 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6797 return DAG.getNode(X86ISD::FHSUB, DL, VT, InVec0, InVec1);
6798 } else if (VT == MVT::v8i32 || VT == MVT::v16i16) {
6799 // Try to match an AVX2 horizontal add/sub of signed integers.
6800 SDValue InVec2, InVec3;
6802 bool CanFold = true;
6804 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, Half, InVec0, InVec1) &&
6805 isHorizontalBinOp(BV, ISD::ADD, DAG, Half, NumElts, InVec2, InVec3) &&
6806 ((InVec0.getOpcode() == ISD::UNDEF ||
6807 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6808 ((InVec1.getOpcode() == ISD::UNDEF ||
6809 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6810 X86Opcode = X86ISD::HADD;
6811 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, Half, InVec0, InVec1) &&
6812 isHorizontalBinOp(BV, ISD::SUB, DAG, Half, NumElts, InVec2, InVec3) &&
6813 ((InVec0.getOpcode() == ISD::UNDEF ||
6814 InVec2.getOpcode() == ISD::UNDEF) || InVec0 == InVec2) &&
6815 ((InVec1.getOpcode() == ISD::UNDEF ||
6816 InVec3.getOpcode() == ISD::UNDEF) || InVec1 == InVec3))
6817 X86Opcode = X86ISD::HSUB;
6822 // Fold this build_vector into a single horizontal add/sub.
6823 // Do this only if the target has AVX2.
6824 if (Subtarget->hasAVX2())
6825 return DAG.getNode(X86Opcode, DL, VT, InVec0, InVec1);
6827 // Do not try to expand this build_vector into a pair of horizontal
6828 // add/sub if we can emit a pair of scalar add/sub.
6829 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6832 // Convert this build_vector into a pair of horizontal binop followed by
6834 bool isUndefLO = NumUndefsLO == Half;
6835 bool isUndefHI = NumUndefsHI == Half;
6836 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, false,
6837 isUndefLO, isUndefHI);
6841 if ((VT == MVT::v8f32 || VT == MVT::v4f64 || VT == MVT::v8i32 ||
6842 VT == MVT::v16i16) && Subtarget->hasAVX()) {
6844 if (isHorizontalBinOp(BV, ISD::ADD, DAG, 0, NumElts, InVec0, InVec1))
6845 X86Opcode = X86ISD::HADD;
6846 else if (isHorizontalBinOp(BV, ISD::SUB, DAG, 0, NumElts, InVec0, InVec1))
6847 X86Opcode = X86ISD::HSUB;
6848 else if (isHorizontalBinOp(BV, ISD::FADD, DAG, 0, NumElts, InVec0, InVec1))
6849 X86Opcode = X86ISD::FHADD;
6850 else if (isHorizontalBinOp(BV, ISD::FSUB, DAG, 0, NumElts, InVec0, InVec1))
6851 X86Opcode = X86ISD::FHSUB;
6855 // Don't try to expand this build_vector into a pair of horizontal add/sub
6856 // if we can simply emit a pair of scalar add/sub.
6857 if (NumUndefsLO + 1 == Half || NumUndefsHI + 1 == Half)
6860 // Convert this build_vector into two horizontal add/sub followed by
6862 bool isUndefLO = NumUndefsLO == Half;
6863 bool isUndefHI = NumUndefsHI == Half;
6864 return ExpandHorizontalBinOp(InVec0, InVec1, DL, DAG, X86Opcode, true,
6865 isUndefLO, isUndefHI);
6872 X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
6875 MVT VT = Op.getSimpleValueType();
6876 MVT ExtVT = VT.getVectorElementType();
6877 unsigned NumElems = Op.getNumOperands();
6879 // Generate vectors for predicate vectors.
6880 if (VT.getScalarType() == MVT::i1 && Subtarget->hasAVX512())
6881 return LowerBUILD_VECTORvXi1(Op, DAG);
6883 // Vectors containing all zeros can be matched by pxor and xorps later
6884 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
6885 // Canonicalize this to <4 x i32> to 1) ensure the zero vectors are CSE'd
6886 // and 2) ensure that i64 scalars are eliminated on x86-32 hosts.
6887 if (VT == MVT::v4i32 || VT == MVT::v8i32 || VT == MVT::v16i32)
6890 return getZeroVector(VT, Subtarget, DAG, dl);
6893 // Vectors containing all ones can be matched by pcmpeqd on 128-bit width
6894 // vectors or broken into v4i32 operations on 256-bit vectors. AVX2 can use
6895 // vpcmpeqd on 256-bit vectors.
6896 if (Subtarget->hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) {
6897 if (VT == MVT::v4i32 || (VT == MVT::v8i32 && Subtarget->hasInt256()))
6900 if (!VT.is512BitVector())
6901 return getOnesVector(VT, Subtarget->hasInt256(), DAG, dl);
6904 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
6905 if (Broadcast.getNode())
6908 unsigned EVTBits = ExtVT.getSizeInBits();
6910 unsigned NumZero = 0;
6911 unsigned NumNonZero = 0;
6912 unsigned NonZeros = 0;
6913 bool IsAllConstants = true;
6914 SmallSet<SDValue, 8> Values;
6915 for (unsigned i = 0; i < NumElems; ++i) {
6916 SDValue Elt = Op.getOperand(i);
6917 if (Elt.getOpcode() == ISD::UNDEF)
6920 if (Elt.getOpcode() != ISD::Constant &&
6921 Elt.getOpcode() != ISD::ConstantFP)
6922 IsAllConstants = false;
6923 if (X86::isZeroNode(Elt))
6926 NonZeros |= (1 << i);
6931 // All undef vector. Return an UNDEF. All zero vectors were handled above.
6932 if (NumNonZero == 0)
6933 return DAG.getUNDEF(VT);
6935 // Special case for single non-zero, non-undef, element.
6936 if (NumNonZero == 1) {
6937 unsigned Idx = countTrailingZeros(NonZeros);
6938 SDValue Item = Op.getOperand(Idx);
6940 // If this is an insertion of an i64 value on x86-32, and if the top bits of
6941 // the value are obviously zero, truncate the value to i32 and do the
6942 // insertion that way. Only do this if the value is non-constant or if the
6943 // value is a constant being inserted into element 0. It is cheaper to do
6944 // a constant pool load than it is to do a movd + shuffle.
6945 if (ExtVT == MVT::i64 && !Subtarget->is64Bit() &&
6946 (!IsAllConstants || Idx == 0)) {
6947 if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
6949 assert(VT == MVT::v2i64 && "Expected an SSE value type!");
6950 EVT VecVT = MVT::v4i32;
6951 unsigned VecElts = 4;
6953 // Truncate the value (which may itself be a constant) to i32, and
6954 // convert it to a vector with movd (S2V+shuffle to zero extend).
6955 Item = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, Item);
6956 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Item);
6958 // If using the new shuffle lowering, just directly insert this.
6959 if (ExperimentalVectorShuffleLowering)
6961 ISD::BITCAST, dl, VT,
6962 getShuffleVectorZeroOrUndef(Item, Idx * 2, true, Subtarget, DAG));
6964 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
6966 // Now we have our 32-bit value zero extended in the low element of
6967 // a vector. If Idx != 0, swizzle it into place.
6969 SmallVector<int, 4> Mask;
6970 Mask.push_back(Idx);
6971 for (unsigned i = 1; i != VecElts; ++i)
6973 Item = DAG.getVectorShuffle(VecVT, dl, Item, DAG.getUNDEF(VecVT),
6976 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
6980 // If we have a constant or non-constant insertion into the low element of
6981 // a vector, we can do this with SCALAR_TO_VECTOR + shuffle of zero into
6982 // the rest of the elements. This will be matched as movd/movq/movss/movsd
6983 // depending on what the source datatype is.
6986 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6988 if (ExtVT == MVT::i32 || ExtVT == MVT::f32 || ExtVT == MVT::f64 ||
6989 (ExtVT == MVT::i64 && Subtarget->is64Bit())) {
6990 if (VT.is256BitVector() || VT.is512BitVector()) {
6991 SDValue ZeroVec = getZeroVector(VT, Subtarget, DAG, dl);
6992 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, ZeroVec,
6993 Item, DAG.getIntPtrConstant(0));
6995 assert(VT.is128BitVector() && "Expected an SSE value type!");
6996 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
6997 // Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
6998 return getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7001 if (ExtVT == MVT::i16 || ExtVT == MVT::i8) {
7002 Item = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, Item);
7003 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, Item);
7004 if (VT.is256BitVector()) {
7005 SDValue ZeroVec = getZeroVector(MVT::v8i32, Subtarget, DAG, dl);
7006 Item = Insert128BitVector(ZeroVec, Item, 0, DAG, dl);
7008 assert(VT.is128BitVector() && "Expected an SSE value type!");
7009 Item = getShuffleVectorZeroOrUndef(Item, 0, true, Subtarget, DAG);
7011 return DAG.getNode(ISD::BITCAST, dl, VT, Item);
7015 // Is it a vector logical left shift?
7016 if (NumElems == 2 && Idx == 1 &&
7017 X86::isZeroNode(Op.getOperand(0)) &&
7018 !X86::isZeroNode(Op.getOperand(1))) {
7019 unsigned NumBits = VT.getSizeInBits();
7020 return getVShift(true, VT,
7021 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
7022 VT, Op.getOperand(1)),
7023 NumBits/2, DAG, *this, dl);
7026 if (IsAllConstants) // Otherwise, it's better to do a constpool load.
7029 // Otherwise, if this is a vector with i32 or f32 elements, and the element
7030 // is a non-constant being inserted into an element other than the low one,
7031 // we can't use a constant pool load. Instead, use SCALAR_TO_VECTOR (aka
7032 // movd/movss) to move this into the low element, then shuffle it into
7034 if (EVTBits == 32) {
7035 Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Item);
7037 // If using the new shuffle lowering, just directly insert this.
7038 if (ExperimentalVectorShuffleLowering)
7039 return getShuffleVectorZeroOrUndef(Item, Idx, NumZero > 0, Subtarget, DAG);
7041 // Turn it into a shuffle of zero and zero-extended scalar to vector.
7042 Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, Subtarget, DAG);
7043 SmallVector<int, 8> MaskVec;
7044 for (unsigned i = 0; i != NumElems; ++i)
7045 MaskVec.push_back(i == Idx ? 0 : 1);
7046 return DAG.getVectorShuffle(VT, dl, Item, DAG.getUNDEF(VT), &MaskVec[0]);
7050 // Splat is obviously ok. Let legalizer expand it to a shuffle.
7051 if (Values.size() == 1) {
7052 if (EVTBits == 32) {
7053 // Instead of a shuffle like this:
7054 // shuffle (scalar_to_vector (load (ptr + 4))), undef, <0, 0, 0, 0>
7055 // Check if it's possible to issue this instead.
7056 // shuffle (vload ptr)), undef, <1, 1, 1, 1>
7057 unsigned Idx = countTrailingZeros(NonZeros);
7058 SDValue Item = Op.getOperand(Idx);
7059 if (Op.getNode()->isOnlyUserOf(Item.getNode()))
7060 return LowerAsSplatVectorLoad(Item, VT, dl, DAG);
7065 // A vector full of immediates; various special cases are already
7066 // handled, so this is best done with a single constant-pool load.
7070 // For AVX-length vectors, see if we can use a vector load to get all of the
7071 // elements, otherwise build the individual 128-bit pieces and use
7072 // shuffles to put them in place.
7073 if (VT.is256BitVector() || VT.is512BitVector()) {
7074 SmallVector<SDValue, 64> V;
7075 for (unsigned i = 0; i != NumElems; ++i)
7076 V.push_back(Op.getOperand(i));
7078 // Check for a build vector of consecutive loads.
7079 if (SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false))
7082 EVT HVT = EVT::getVectorVT(*DAG.getContext(), ExtVT, NumElems/2);
7084 // Build both the lower and upper subvector.
7085 SDValue Lower = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7086 makeArrayRef(&V[0], NumElems/2));
7087 SDValue Upper = DAG.getNode(ISD::BUILD_VECTOR, dl, HVT,
7088 makeArrayRef(&V[NumElems / 2], NumElems/2));
7090 // Recreate the wider vector with the lower and upper part.
7091 if (VT.is256BitVector())
7092 return Concat128BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7093 return Concat256BitVectors(Lower, Upper, VT, NumElems, DAG, dl);
7096 // Let legalizer expand 2-wide build_vectors.
7097 if (EVTBits == 64) {
7098 if (NumNonZero == 1) {
7099 // One half is zero or undef.
7100 unsigned Idx = countTrailingZeros(NonZeros);
7101 SDValue V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT,
7102 Op.getOperand(Idx));
7103 return getShuffleVectorZeroOrUndef(V2, Idx, true, Subtarget, DAG);
7108 // If element VT is < 32 bits, convert it to inserts into a zero vector.
7109 if (EVTBits == 8 && NumElems == 16) {
7110 SDValue V = LowerBuildVectorv16i8(Op, NonZeros,NumNonZero,NumZero, DAG,
7112 if (V.getNode()) return V;
7115 if (EVTBits == 16 && NumElems == 8) {
7116 SDValue V = LowerBuildVectorv8i16(Op, NonZeros,NumNonZero,NumZero, DAG,
7118 if (V.getNode()) return V;
7121 // If element VT is == 32 bits and has 4 elems, try to generate an INSERTPS
7122 if (EVTBits == 32 && NumElems == 4) {
7123 SDValue V = LowerBuildVectorv4x32(Op, DAG, Subtarget, *this);
7128 // If element VT is == 32 bits, turn it into a number of shuffles.
7129 SmallVector<SDValue, 8> V(NumElems);
7130 if (NumElems == 4 && NumZero > 0) {
7131 for (unsigned i = 0; i < 4; ++i) {
7132 bool isZero = !(NonZeros & (1 << i));
7134 V[i] = getZeroVector(VT, Subtarget, DAG, dl);
7136 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7139 for (unsigned i = 0; i < 2; ++i) {
7140 switch ((NonZeros & (0x3 << i*2)) >> (i*2)) {
7143 V[i] = V[i*2]; // Must be a zero vector.
7146 V[i] = getMOVL(DAG, dl, VT, V[i*2+1], V[i*2]);
7149 V[i] = getMOVL(DAG, dl, VT, V[i*2], V[i*2+1]);
7152 V[i] = getUnpackl(DAG, dl, VT, V[i*2], V[i*2+1]);
7157 bool Reverse1 = (NonZeros & 0x3) == 2;
7158 bool Reverse2 = ((NonZeros & (0x3 << 2)) >> 2) == 2;
7162 static_cast<int>(Reverse2 ? NumElems+1 : NumElems),
7163 static_cast<int>(Reverse2 ? NumElems : NumElems+1)
7165 return DAG.getVectorShuffle(VT, dl, V[0], V[1], &MaskVec[0]);
7168 if (Values.size() > 1 && VT.is128BitVector()) {
7169 // Check for a build vector of consecutive loads.
7170 for (unsigned i = 0; i < NumElems; ++i)
7171 V[i] = Op.getOperand(i);
7173 // Check for elements which are consecutive loads.
7174 SDValue LD = EltsFromConsecutiveLoads(VT, V, dl, DAG, false);
7178 // Check for a build vector from mostly shuffle plus few inserting.
7179 SDValue Sh = buildFromShuffleMostly(Op, DAG);
7183 // For SSE 4.1, use insertps to put the high elements into the low element.
7184 if (Subtarget->hasSSE41()) {
7186 if (Op.getOperand(0).getOpcode() != ISD::UNDEF)
7187 Result = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(0));
7189 Result = DAG.getUNDEF(VT);
7191 for (unsigned i = 1; i < NumElems; ++i) {
7192 if (Op.getOperand(i).getOpcode() == ISD::UNDEF) continue;
7193 Result = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Result,
7194 Op.getOperand(i), DAG.getIntPtrConstant(i));
7199 // Otherwise, expand into a number of unpckl*, start by extending each of
7200 // our (non-undef) elements to the full vector width with the element in the
7201 // bottom slot of the vector (which generates no code for SSE).
7202 for (unsigned i = 0; i < NumElems; ++i) {
7203 if (Op.getOperand(i).getOpcode() != ISD::UNDEF)
7204 V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op.getOperand(i));
7206 V[i] = DAG.getUNDEF(VT);
7209 // Next, we iteratively mix elements, e.g. for v4f32:
7210 // Step 1: unpcklps 0, 2 ==> X: <?, ?, 2, 0>
7211 // : unpcklps 1, 3 ==> Y: <?, ?, 3, 1>
7212 // Step 2: unpcklps X, Y ==> <3, 2, 1, 0>
7213 unsigned EltStride = NumElems >> 1;
7214 while (EltStride != 0) {
7215 for (unsigned i = 0; i < EltStride; ++i) {
7216 // If V[i+EltStride] is undef and this is the first round of mixing,
7217 // then it is safe to just drop this shuffle: V[i] is already in the
7218 // right place, the one element (since it's the first round) being
7219 // inserted as undef can be dropped. This isn't safe for successive
7220 // rounds because they will permute elements within both vectors.
7221 if (V[i+EltStride].getOpcode() == ISD::UNDEF &&
7222 EltStride == NumElems/2)
7225 V[i] = getUnpackl(DAG, dl, VT, V[i], V[i + EltStride]);
7234 // LowerAVXCONCAT_VECTORS - 256-bit AVX can use the vinsertf128 instruction
7235 // to create 256-bit vectors from two other 128-bit ones.
7236 static SDValue LowerAVXCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7238 MVT ResVT = Op.getSimpleValueType();
7240 assert((ResVT.is256BitVector() ||
7241 ResVT.is512BitVector()) && "Value type must be 256-/512-bit wide");
7243 SDValue V1 = Op.getOperand(0);
7244 SDValue V2 = Op.getOperand(1);
7245 unsigned NumElems = ResVT.getVectorNumElements();
7246 if(ResVT.is256BitVector())
7247 return Concat128BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7249 if (Op.getNumOperands() == 4) {
7250 MVT HalfVT = MVT::getVectorVT(ResVT.getScalarType(),
7251 ResVT.getVectorNumElements()/2);
7252 SDValue V3 = Op.getOperand(2);
7253 SDValue V4 = Op.getOperand(3);
7254 return Concat256BitVectors(Concat128BitVectors(V1, V2, HalfVT, NumElems/2, DAG, dl),
7255 Concat128BitVectors(V3, V4, HalfVT, NumElems/2, DAG, dl), ResVT, NumElems, DAG, dl);
7257 return Concat256BitVectors(V1, V2, ResVT, NumElems, DAG, dl);
7260 static SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) {
7261 MVT LLVM_ATTRIBUTE_UNUSED VT = Op.getSimpleValueType();
7262 assert((VT.is256BitVector() && Op.getNumOperands() == 2) ||
7263 (VT.is512BitVector() && (Op.getNumOperands() == 2 ||
7264 Op.getNumOperands() == 4)));
7266 // AVX can use the vinsertf128 instruction to create 256-bit vectors
7267 // from two other 128-bit ones.
7269 // 512-bit vector may contain 2 256-bit vectors or 4 128-bit vectors
7270 return LowerAVXCONCAT_VECTORS(Op, DAG);
7274 //===----------------------------------------------------------------------===//
7275 // Vector shuffle lowering
7277 // This is an experimental code path for lowering vector shuffles on x86. It is
7278 // designed to handle arbitrary vector shuffles and blends, gracefully
7279 // degrading performance as necessary. It works hard to recognize idiomatic
7280 // shuffles and lower them to optimal instruction patterns without leaving
7281 // a framework that allows reasonably efficient handling of all vector shuffle
7283 //===----------------------------------------------------------------------===//
7285 /// \brief Tiny helper function to identify a no-op mask.
7287 /// This is a somewhat boring predicate function. It checks whether the mask
7288 /// array input, which is assumed to be a single-input shuffle mask of the kind
7289 /// used by the X86 shuffle instructions (not a fully general
7290 /// ShuffleVectorSDNode mask) requires any shuffles to occur. Both undef and an
7291 /// in-place shuffle are 'no-op's.
7292 static bool isNoopShuffleMask(ArrayRef<int> Mask) {
7293 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7294 if (Mask[i] != -1 && Mask[i] != i)
7299 /// \brief Helper function to classify a mask as a single-input mask.
7301 /// This isn't a generic single-input test because in the vector shuffle
7302 /// lowering we canonicalize single inputs to be the first input operand. This
7303 /// means we can more quickly test for a single input by only checking whether
7304 /// an input from the second operand exists. We also assume that the size of
7305 /// mask corresponds to the size of the input vectors which isn't true in the
7306 /// fully general case.
7307 static bool isSingleInputShuffleMask(ArrayRef<int> Mask) {
7309 if (M >= (int)Mask.size())
7314 /// \brief Test whether there are elements crossing 128-bit lanes in this
7317 /// X86 divides up its shuffles into in-lane and cross-lane shuffle operations
7318 /// and we routinely test for these.
7319 static bool is128BitLaneCrossingShuffleMask(MVT VT, ArrayRef<int> Mask) {
7320 int LaneSize = 128 / VT.getScalarSizeInBits();
7321 int Size = Mask.size();
7322 for (int i = 0; i < Size; ++i)
7323 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
7328 /// \brief Test whether a shuffle mask is equivalent within each 128-bit lane.
7330 /// This checks a shuffle mask to see if it is performing the same
7331 /// 128-bit lane-relative shuffle in each 128-bit lane. This trivially implies
7332 /// that it is also not lane-crossing. It may however involve a blend from the
7333 /// same lane of a second vector.
7335 /// The specific repeated shuffle mask is populated in \p RepeatedMask, as it is
7336 /// non-trivial to compute in the face of undef lanes. The representation is
7337 /// *not* suitable for use with existing 128-bit shuffles as it will contain
7338 /// entries from both V1 and V2 inputs to the wider mask.
7340 is128BitLaneRepeatedShuffleMask(MVT VT, ArrayRef<int> Mask,
7341 SmallVectorImpl<int> &RepeatedMask) {
7342 int LaneSize = 128 / VT.getScalarSizeInBits();
7343 RepeatedMask.resize(LaneSize, -1);
7344 int Size = Mask.size();
7345 for (int i = 0; i < Size; ++i) {
7348 if ((Mask[i] % Size) / LaneSize != i / LaneSize)
7349 // This entry crosses lanes, so there is no way to model this shuffle.
7352 // Ok, handle the in-lane shuffles by detecting if and when they repeat.
7353 if (RepeatedMask[i % LaneSize] == -1)
7354 // This is the first non-undef entry in this slot of a 128-bit lane.
7355 RepeatedMask[i % LaneSize] =
7356 Mask[i] < Size ? Mask[i] % LaneSize : Mask[i] % LaneSize + Size;
7357 else if (RepeatedMask[i % LaneSize] + (i / LaneSize) * LaneSize != Mask[i])
7358 // Found a mismatch with the repeated mask.
7364 // Hide this symbol with an anonymous namespace instead of 'static' so that MSVC
7365 // 2013 will allow us to use it as a non-type template parameter.
7368 /// \brief Implementation of the \c isShuffleEquivalent variadic functor.
7370 /// See its documentation for details.
7371 bool isShuffleEquivalentImpl(SDValue V1, SDValue V2, ArrayRef<int> Mask,
7372 ArrayRef<const int *> Args) {
7373 if (Mask.size() != Args.size())
7376 // If the values are build vectors, we can look through them to find
7377 // equivalent inputs that make the shuffles equivalent.
7378 auto *BV1 = dyn_cast<BuildVectorSDNode>(V1);
7379 auto *BV2 = dyn_cast<BuildVectorSDNode>(V2);
7381 for (int i = 0, e = Mask.size(); i < e; ++i) {
7382 assert(*Args[i] >= 0 && "Arguments must be positive integers!");
7383 if (Mask[i] != -1 && Mask[i] != *Args[i]) {
7384 auto *MaskBV = Mask[i] < e ? BV1 : BV2;
7385 auto *ArgsBV = *Args[i] < e ? BV1 : BV2;
7386 if (!MaskBV || !ArgsBV ||
7387 MaskBV->getOperand(Mask[i] % e) != ArgsBV->getOperand(*Args[i] % e))
7396 /// \brief Checks whether a shuffle mask is equivalent to an explicit list of
7399 /// This is a fast way to test a shuffle mask against a fixed pattern:
7401 /// if (isShuffleEquivalent(Mask, 3, 2, 1, 0)) { ... }
7403 /// It returns true if the mask is exactly as wide as the argument list, and
7404 /// each element of the mask is either -1 (signifying undef) or the value given
7405 /// in the argument.
7406 static const VariadicFunction3<bool, SDValue, SDValue, ArrayRef<int>, int,
7407 isShuffleEquivalentImpl> isShuffleEquivalent =
7410 /// \brief Get a 4-lane 8-bit shuffle immediate for a mask.
7412 /// This helper function produces an 8-bit shuffle immediate corresponding to
7413 /// the ubiquitous shuffle encoding scheme used in x86 instructions for
7414 /// shuffling 4 lanes. It can be used with most of the PSHUF instructions for
7417 /// NB: We rely heavily on "undef" masks preserving the input lane.
7418 static SDValue getV4X86ShuffleImm8ForMask(ArrayRef<int> Mask,
7419 SelectionDAG &DAG) {
7420 assert(Mask.size() == 4 && "Only 4-lane shuffle masks");
7421 assert(Mask[0] >= -1 && Mask[0] < 4 && "Out of bound mask element!");
7422 assert(Mask[1] >= -1 && Mask[1] < 4 && "Out of bound mask element!");
7423 assert(Mask[2] >= -1 && Mask[2] < 4 && "Out of bound mask element!");
7424 assert(Mask[3] >= -1 && Mask[3] < 4 && "Out of bound mask element!");
7427 Imm |= (Mask[0] == -1 ? 0 : Mask[0]) << 0;
7428 Imm |= (Mask[1] == -1 ? 1 : Mask[1]) << 2;
7429 Imm |= (Mask[2] == -1 ? 2 : Mask[2]) << 4;
7430 Imm |= (Mask[3] == -1 ? 3 : Mask[3]) << 6;
7431 return DAG.getConstant(Imm, MVT::i8);
7434 /// \brief Try to emit a blend instruction for a shuffle.
7436 /// This doesn't do any checks for the availability of instructions for blending
7437 /// these values. It relies on the availability of the X86ISD::BLENDI pattern to
7438 /// be matched in the backend with the type given. What it does check for is
7439 /// that the shuffle mask is in fact a blend.
7440 static SDValue lowerVectorShuffleAsBlend(SDLoc DL, MVT VT, SDValue V1,
7441 SDValue V2, ArrayRef<int> Mask,
7442 const X86Subtarget *Subtarget,
7443 SelectionDAG &DAG) {
7445 unsigned BlendMask = 0;
7446 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7447 if (Mask[i] >= Size) {
7448 if (Mask[i] != i + Size)
7449 return SDValue(); // Shuffled V2 input!
7450 BlendMask |= 1u << i;
7453 if (Mask[i] >= 0 && Mask[i] != i)
7454 return SDValue(); // Shuffled V1 input!
7456 switch (VT.SimpleTy) {
7461 return DAG.getNode(X86ISD::BLENDI, DL, VT, V1, V2,
7462 DAG.getConstant(BlendMask, MVT::i8));
7466 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7470 // If we have AVX2 it is faster to use VPBLENDD when the shuffle fits into
7471 // that instruction.
7472 if (Subtarget->hasAVX2()) {
7473 // Scale the blend by the number of 32-bit dwords per element.
7474 int Scale = VT.getScalarSizeInBits() / 32;
7476 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7477 if (Mask[i] >= Size)
7478 for (int j = 0; j < Scale; ++j)
7479 BlendMask |= 1u << (i * Scale + j);
7481 MVT BlendVT = VT.getSizeInBits() > 128 ? MVT::v8i32 : MVT::v4i32;
7482 V1 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V1);
7483 V2 = DAG.getNode(ISD::BITCAST, DL, BlendVT, V2);
7484 return DAG.getNode(ISD::BITCAST, DL, VT,
7485 DAG.getNode(X86ISD::BLENDI, DL, BlendVT, V1, V2,
7486 DAG.getConstant(BlendMask, MVT::i8)));
7490 // For integer shuffles we need to expand the mask and cast the inputs to
7491 // v8i16s prior to blending.
7492 int Scale = 8 / VT.getVectorNumElements();
7494 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7495 if (Mask[i] >= Size)
7496 for (int j = 0; j < Scale; ++j)
7497 BlendMask |= 1u << (i * Scale + j);
7499 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
7500 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
7501 return DAG.getNode(ISD::BITCAST, DL, VT,
7502 DAG.getNode(X86ISD::BLENDI, DL, MVT::v8i16, V1, V2,
7503 DAG.getConstant(BlendMask, MVT::i8)));
7507 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7508 SmallVector<int, 8> RepeatedMask;
7509 if (is128BitLaneRepeatedShuffleMask(MVT::v16i16, Mask, RepeatedMask)) {
7510 // We can lower these with PBLENDW which is mirrored across 128-bit lanes.
7511 assert(RepeatedMask.size() == 8 && "Repeated mask size doesn't match!");
7513 for (int i = 0; i < 8; ++i)
7514 if (RepeatedMask[i] >= 16)
7515 BlendMask |= 1u << i;
7516 return DAG.getNode(X86ISD::BLENDI, DL, MVT::v16i16, V1, V2,
7517 DAG.getConstant(BlendMask, MVT::i8));
7522 assert(Subtarget->hasAVX2() && "256-bit integer blends require AVX2!");
7523 // Scale the blend by the number of bytes per element.
7524 int Scale = VT.getScalarSizeInBits() / 8;
7525 assert(Mask.size() * Scale == 32 && "Not a 256-bit vector!");
7527 // Compute the VSELECT mask. Note that VSELECT is really confusing in the
7528 // mix of LLVM's code generator and the x86 backend. We tell the code
7529 // generator that boolean values in the elements of an x86 vector register
7530 // are -1 for true and 0 for false. We then use the LLVM semantics of 'true'
7531 // mapping a select to operand #1, and 'false' mapping to operand #2. The
7532 // reality in x86 is that vector masks (pre-AVX-512) use only the high bit
7533 // of the element (the remaining are ignored) and 0 in that high bit would
7534 // mean operand #1 while 1 in the high bit would mean operand #2. So while
7535 // the LLVM model for boolean values in vector elements gets the relevant
7536 // bit set, it is set backwards and over constrained relative to x86's
7538 SDValue VSELECTMask[32];
7539 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7540 for (int j = 0; j < Scale; ++j)
7541 VSELECTMask[Scale * i + j] =
7542 Mask[i] < 0 ? DAG.getUNDEF(MVT::i8)
7543 : DAG.getConstant(Mask[i] < Size ? -1 : 0, MVT::i8);
7545 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1);
7546 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V2);
7548 ISD::BITCAST, DL, VT,
7549 DAG.getNode(ISD::VSELECT, DL, MVT::v32i8,
7550 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, VSELECTMask),
7555 llvm_unreachable("Not a supported integer vector type!");
7559 /// \brief Try to lower as a blend of elements from two inputs followed by
7560 /// a single-input permutation.
7562 /// This matches the pattern where we can blend elements from two inputs and
7563 /// then reduce the shuffle to a single-input permutation.
7564 static SDValue lowerVectorShuffleAsBlendAndPermute(SDLoc DL, MVT VT, SDValue V1,
7567 SelectionDAG &DAG) {
7568 // We build up the blend mask while checking whether a blend is a viable way
7569 // to reduce the shuffle.
7570 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7571 SmallVector<int, 32> PermuteMask(Mask.size(), -1);
7573 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7577 assert(Mask[i] < Size * 2 && "Shuffle input is out of bounds.");
7579 if (BlendMask[Mask[i] % Size] == -1)
7580 BlendMask[Mask[i] % Size] = Mask[i];
7581 else if (BlendMask[Mask[i] % Size] != Mask[i])
7582 return SDValue(); // Can't blend in the needed input!
7584 PermuteMask[i] = Mask[i] % Size;
7587 SDValue V = DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7588 return DAG.getVectorShuffle(VT, DL, V, DAG.getUNDEF(VT), PermuteMask);
7591 /// \brief Generic routine to decompose a shuffle and blend into indepndent
7592 /// blends and permutes.
7594 /// This matches the extremely common pattern for handling combined
7595 /// shuffle+blend operations on newer X86 ISAs where we have very fast blend
7596 /// operations. It will try to pick the best arrangement of shuffles and
7598 static SDValue lowerVectorShuffleAsDecomposedShuffleBlend(SDLoc DL, MVT VT,
7602 SelectionDAG &DAG) {
7603 // Shuffle the input elements into the desired positions in V1 and V2 and
7604 // blend them together.
7605 SmallVector<int, 32> V1Mask(Mask.size(), -1);
7606 SmallVector<int, 32> V2Mask(Mask.size(), -1);
7607 SmallVector<int, 32> BlendMask(Mask.size(), -1);
7608 for (int i = 0, Size = Mask.size(); i < Size; ++i)
7609 if (Mask[i] >= 0 && Mask[i] < Size) {
7610 V1Mask[i] = Mask[i];
7612 } else if (Mask[i] >= Size) {
7613 V2Mask[i] = Mask[i] - Size;
7614 BlendMask[i] = i + Size;
7617 // Try to lower with the simpler initial blend strategy unless one of the
7618 // input shuffles would be a no-op. We prefer to shuffle inputs as the
7619 // shuffle may be able to fold with a load or other benefit. However, when
7620 // we'll have to do 2x as many shuffles in order to achieve this, blending
7621 // first is a better strategy.
7622 if (!isNoopShuffleMask(V1Mask) && !isNoopShuffleMask(V2Mask))
7623 if (SDValue BlendPerm =
7624 lowerVectorShuffleAsBlendAndPermute(DL, VT, V1, V2, Mask, DAG))
7627 V1 = DAG.getVectorShuffle(VT, DL, V1, DAG.getUNDEF(VT), V1Mask);
7628 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Mask);
7629 return DAG.getVectorShuffle(VT, DL, V1, V2, BlendMask);
7632 /// \brief Try to lower a vector shuffle as a byte rotation.
7634 /// SSSE3 has a generic PALIGNR instruction in x86 that will do an arbitrary
7635 /// byte-rotation of the concatenation of two vectors; pre-SSSE3 can use
7636 /// a PSRLDQ/PSLLDQ/POR pattern to get a similar effect. This routine will
7637 /// try to generically lower a vector shuffle through such an pattern. It
7638 /// does not check for the profitability of lowering either as PALIGNR or
7639 /// PSRLDQ/PSLLDQ/POR, only whether the mask is valid to lower in that form.
7640 /// This matches shuffle vectors that look like:
7642 /// v8i16 [11, 12, 13, 14, 15, 0, 1, 2]
7644 /// Essentially it concatenates V1 and V2, shifts right by some number of
7645 /// elements, and takes the low elements as the result. Note that while this is
7646 /// specified as a *right shift* because x86 is little-endian, it is a *left
7647 /// rotate* of the vector lanes.
7649 /// Note that this only handles 128-bit vector widths currently.
7650 static SDValue lowerVectorShuffleAsByteRotate(SDLoc DL, MVT VT, SDValue V1,
7653 const X86Subtarget *Subtarget,
7654 SelectionDAG &DAG) {
7655 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7657 // We need to detect various ways of spelling a rotation:
7658 // [11, 12, 13, 14, 15, 0, 1, 2]
7659 // [-1, 12, 13, 14, -1, -1, 1, -1]
7660 // [-1, -1, -1, -1, -1, -1, 1, 2]
7661 // [ 3, 4, 5, 6, 7, 8, 9, 10]
7662 // [-1, 4, 5, 6, -1, -1, 9, -1]
7663 // [-1, 4, 5, 6, -1, -1, -1, -1]
7666 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7669 assert(Mask[i] >= 0 && "Only -1 is a valid negative mask element!");
7671 // Based on the mod-Size value of this mask element determine where
7672 // a rotated vector would have started.
7673 int StartIdx = i - (Mask[i] % Size);
7675 // The identity rotation isn't interesting, stop.
7678 // If we found the tail of a vector the rotation must be the missing
7679 // front. If we found the head of a vector, it must be how much of the head.
7680 int CandidateRotation = StartIdx < 0 ? -StartIdx : Size - StartIdx;
7683 Rotation = CandidateRotation;
7684 else if (Rotation != CandidateRotation)
7685 // The rotations don't match, so we can't match this mask.
7688 // Compute which value this mask is pointing at.
7689 SDValue MaskV = Mask[i] < Size ? V1 : V2;
7691 // Compute which of the two target values this index should be assigned to.
7692 // This reflects whether the high elements are remaining or the low elements
7694 SDValue &TargetV = StartIdx < 0 ? Hi : Lo;
7696 // Either set up this value if we've not encountered it before, or check
7697 // that it remains consistent.
7700 else if (TargetV != MaskV)
7701 // This may be a rotation, but it pulls from the inputs in some
7702 // unsupported interleaving.
7706 // Check that we successfully analyzed the mask, and normalize the results.
7707 assert(Rotation != 0 && "Failed to locate a viable rotation!");
7708 assert((Lo || Hi) && "Failed to find a rotated input vector!");
7714 assert(VT.getSizeInBits() == 128 &&
7715 "Rotate-based lowering only supports 128-bit lowering!");
7716 assert(Mask.size() <= 16 &&
7717 "Can shuffle at most 16 bytes in a 128-bit vector!");
7719 // The actual rotate instruction rotates bytes, so we need to scale the
7720 // rotation based on how many bytes are in the vector.
7721 int Scale = 16 / Mask.size();
7723 // SSSE3 targets can use the palignr instruction
7724 if (Subtarget->hasSSSE3()) {
7725 // Cast the inputs to v16i8 to match PALIGNR.
7726 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Lo);
7727 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Hi);
7729 return DAG.getNode(ISD::BITCAST, DL, VT,
7730 DAG.getNode(X86ISD::PALIGNR, DL, MVT::v16i8, Hi, Lo,
7731 DAG.getConstant(Rotation * Scale, MVT::i8)));
7734 // Default SSE2 implementation
7735 int LoByteShift = 16 - Rotation * Scale;
7736 int HiByteShift = Rotation * Scale;
7738 // Cast the inputs to v2i64 to match PSLLDQ/PSRLDQ.
7739 Lo = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Lo);
7740 Hi = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, Hi);
7742 SDValue LoShift = DAG.getNode(X86ISD::VSHLDQ, DL, MVT::v2i64, Lo,
7743 DAG.getConstant(8 * LoByteShift, MVT::i8));
7744 SDValue HiShift = DAG.getNode(X86ISD::VSRLDQ, DL, MVT::v2i64, Hi,
7745 DAG.getConstant(8 * HiByteShift, MVT::i8));
7746 return DAG.getNode(ISD::BITCAST, DL, VT,
7747 DAG.getNode(ISD::OR, DL, MVT::v2i64, LoShift, HiShift));
7750 /// \brief Compute whether each element of a shuffle is zeroable.
7752 /// A "zeroable" vector shuffle element is one which can be lowered to zero.
7753 /// Either it is an undef element in the shuffle mask, the element of the input
7754 /// referenced is undef, or the element of the input referenced is known to be
7755 /// zero. Many x86 shuffles can zero lanes cheaply and we often want to handle
7756 /// as many lanes with this technique as possible to simplify the remaining
7758 static SmallBitVector computeZeroableShuffleElements(ArrayRef<int> Mask,
7759 SDValue V1, SDValue V2) {
7760 SmallBitVector Zeroable(Mask.size(), false);
7762 while (V1.getOpcode() == ISD::BITCAST)
7763 V1 = V1->getOperand(0);
7764 while (V2.getOpcode() == ISD::BITCAST)
7765 V2 = V2->getOperand(0);
7767 bool V1IsZero = ISD::isBuildVectorAllZeros(V1.getNode());
7768 bool V2IsZero = ISD::isBuildVectorAllZeros(V2.getNode());
7770 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7772 // Handle the easy cases.
7773 if (M < 0 || (M >= 0 && M < Size && V1IsZero) || (M >= Size && V2IsZero)) {
7778 // If this is an index into a build_vector node (which has the same number
7779 // of elements), dig out the input value and use it.
7780 SDValue V = M < Size ? V1 : V2;
7781 if (V.getOpcode() != ISD::BUILD_VECTOR || Size != (int)V.getNumOperands())
7784 SDValue Input = V.getOperand(M % Size);
7785 // The UNDEF opcode check really should be dead code here, but not quite
7786 // worth asserting on (it isn't invalid, just unexpected).
7787 if (Input.getOpcode() == ISD::UNDEF || X86::isZeroNode(Input))
7794 /// \brief Try to emit a bitmask instruction for a shuffle.
7796 /// This handles cases where we can model a blend exactly as a bitmask due to
7797 /// one of the inputs being zeroable.
7798 static SDValue lowerVectorShuffleAsBitMask(SDLoc DL, MVT VT, SDValue V1,
7799 SDValue V2, ArrayRef<int> Mask,
7800 SelectionDAG &DAG) {
7801 MVT EltVT = VT.getScalarType();
7802 int NumEltBits = EltVT.getSizeInBits();
7803 MVT IntEltVT = MVT::getIntegerVT(NumEltBits);
7804 SDValue Zero = DAG.getConstant(0, IntEltVT);
7805 SDValue AllOnes = DAG.getConstant(APInt::getAllOnesValue(NumEltBits), IntEltVT);
7806 if (EltVT.isFloatingPoint()) {
7807 Zero = DAG.getNode(ISD::BITCAST, DL, EltVT, Zero);
7808 AllOnes = DAG.getNode(ISD::BITCAST, DL, EltVT, AllOnes);
7810 SmallVector<SDValue, 16> VMaskOps(Mask.size(), Zero);
7811 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7813 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
7816 if (Mask[i] % Size != i)
7817 return SDValue(); // Not a blend.
7819 V = Mask[i] < Size ? V1 : V2;
7820 else if (V != (Mask[i] < Size ? V1 : V2))
7821 return SDValue(); // Can only let one input through the mask.
7823 VMaskOps[i] = AllOnes;
7826 return SDValue(); // No non-zeroable elements!
7828 SDValue VMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VT, VMaskOps);
7829 V = DAG.getNode(VT.isFloatingPoint()
7830 ? (unsigned) X86ISD::FAND : (unsigned) ISD::AND,
7835 /// \brief Try to lower a vector shuffle as a byte shift (shifts in zeros).
7837 /// Attempts to match a shuffle mask against the PSRLDQ and PSLLDQ
7838 /// byte-shift instructions. The mask must consist of a shifted sequential
7839 /// shuffle from one of the input vectors and zeroable elements for the
7840 /// remaining 'shifted in' elements.
7841 static SDValue lowerVectorShuffleAsByteShift(SDLoc DL, MVT VT, SDValue V1,
7842 SDValue V2, ArrayRef<int> Mask,
7843 SelectionDAG &DAG) {
7844 assert(!isNoopShuffleMask(Mask) && "We shouldn't lower no-op shuffles!");
7846 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7848 int NumElts = VT.getVectorNumElements();
7849 int NumLanes = VT.getSizeInBits() / 128;
7850 int NumLaneElts = NumElts / NumLanes;
7851 int Scale = 16 / NumLaneElts;
7852 MVT ShiftVT = MVT::getVectorVT(MVT::i64, 2 * NumLanes);
7854 // PSLLDQ : (little-endian) left byte shift
7855 // [ zz, 0, 1, 2, 3, 4, 5, 6]
7856 // [ zz, zz, -1, -1, 2, 3, 4, -1]
7857 // [ zz, zz, zz, zz, zz, zz, -1, 1]
7858 // PSRLDQ : (little-endian) right byte shift
7859 // [ 5, 6, 7, zz, zz, zz, zz, zz]
7860 // [ -1, 5, 6, 7, zz, zz, zz, zz]
7861 // [ 1, 2, -1, -1, -1, -1, zz, zz]
7862 auto MatchByteShift = [&](int Shift) -> SDValue {
7863 bool MatchLeft = true, MatchRight = true;
7864 for (int l = 0; l < NumElts; l += NumLaneElts) {
7865 for (int i = 0; i < Shift; ++i)
7866 MatchLeft &= Zeroable[l + i];
7867 for (int i = NumLaneElts - Shift; i < NumLaneElts; ++i)
7868 MatchRight &= Zeroable[l + i];
7870 if (!(MatchLeft || MatchRight))
7873 bool MatchV1 = true, MatchV2 = true;
7874 for (int l = 0; l < NumElts; l += NumLaneElts) {
7875 unsigned Pos = MatchLeft ? Shift + l : l;
7876 unsigned Low = MatchLeft ? l : Shift + l;
7877 unsigned Len = NumLaneElts - Shift;
7878 MatchV1 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low);
7879 MatchV2 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low + NumElts);
7881 if (!(MatchV1 || MatchV2))
7884 int ByteShift = Shift * Scale;
7885 unsigned Op = MatchRight ? X86ISD::VSRLDQ : X86ISD::VSHLDQ;
7886 SDValue V = MatchV1 ? V1 : V2;
7887 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7888 V = DAG.getNode(Op, DL, ShiftVT, V,
7889 DAG.getConstant(ByteShift * 8, MVT::i8));
7890 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7893 for (int Shift = 1; Shift < NumLaneElts; ++Shift)
7894 if (SDValue S = MatchByteShift(Shift))
7901 /// \brief Try to lower a vector shuffle as a bit shift (shifts in zeros).
7903 /// Attempts to match a shuffle mask against the PSRL(W/D/Q) and PSLL(W/D/Q)
7904 /// SSE2 and AVX2 logical bit-shift instructions. The function matches
7905 /// elements from one of the input vectors shuffled to the left or right
7906 /// with zeroable elements 'shifted in'.
7907 static SDValue lowerVectorShuffleAsBitShift(SDLoc DL, MVT VT, SDValue V1,
7908 SDValue V2, ArrayRef<int> Mask,
7909 SelectionDAG &DAG) {
7910 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
7912 int Size = Mask.size();
7913 assert(Size == (int)VT.getVectorNumElements() && "Unexpected mask size");
7915 // PSRL : (little-endian) right bit shift.
7918 // PSHL : (little-endian) left bit shift.
7920 // [ -1, 4, zz, -1 ]
7921 auto MatchBitShift = [&](int Shift, int Scale) -> SDValue {
7922 MVT ShiftSVT = MVT::getIntegerVT(VT.getScalarSizeInBits() * Scale);
7923 MVT ShiftVT = MVT::getVectorVT(ShiftSVT, Size / Scale);
7924 assert(DAG.getTargetLoweringInfo().isTypeLegal(ShiftVT) &&
7925 "Illegal integer vector type");
7927 bool MatchLeft = true, MatchRight = true;
7928 for (int i = 0; i != Size; i += Scale) {
7929 for (int j = 0; j != Shift; ++j) {
7930 MatchLeft &= Zeroable[i + j];
7932 for (int j = Scale - Shift; j != Scale; ++j) {
7933 MatchRight &= Zeroable[i + j];
7936 if (!(MatchLeft || MatchRight))
7939 bool MatchV1 = true, MatchV2 = true;
7940 for (int i = 0; i != Size; i += Scale) {
7941 unsigned Pos = MatchLeft ? i + Shift : i;
7942 unsigned Low = MatchLeft ? i : i + Shift;
7943 unsigned Len = Scale - Shift;
7944 MatchV1 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low);
7945 MatchV2 &= isSequentialOrUndefInRange(Mask, Pos, Len, Low + Size);
7947 if (!(MatchV1 || MatchV2))
7950 // Cast the inputs to ShiftVT to match VSRLI/VSHLI and back again.
7951 unsigned OpCode = MatchLeft ? X86ISD::VSHLI : X86ISD::VSRLI;
7952 int ShiftAmt = Shift * VT.getScalarSizeInBits();
7953 SDValue V = MatchV1 ? V1 : V2;
7954 V = DAG.getNode(ISD::BITCAST, DL, ShiftVT, V);
7955 V = DAG.getNode(OpCode, DL, ShiftVT, V, DAG.getConstant(ShiftAmt, MVT::i8));
7956 return DAG.getNode(ISD::BITCAST, DL, VT, V);
7959 // SSE/AVX supports logical shifts up to 64-bit integers - so we can just
7960 // keep doubling the size of the integer elements up to that. We can
7961 // then shift the elements of the integer vector by whole multiples of
7962 // their width within the elements of the larger integer vector. Test each
7963 // multiple to see if we can find a match with the moved element indices
7964 // and that the shifted in elements are all zeroable.
7965 for (int Scale = 2; Scale * VT.getScalarSizeInBits() <= 64; Scale *= 2)
7966 for (int Shift = 1; Shift != Scale; ++Shift)
7967 if (SDValue BitShift = MatchBitShift(Shift, Scale))
7974 /// \brief Lower a vector shuffle as a zero or any extension.
7976 /// Given a specific number of elements, element bit width, and extension
7977 /// stride, produce either a zero or any extension based on the available
7978 /// features of the subtarget.
7979 static SDValue lowerVectorShuffleAsSpecificZeroOrAnyExtend(
7980 SDLoc DL, MVT VT, int Scale, bool AnyExt, SDValue InputV,
7981 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
7982 assert(Scale > 1 && "Need a scale to extend.");
7983 int NumElements = VT.getVectorNumElements();
7984 int EltBits = VT.getScalarSizeInBits();
7985 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
7986 "Only 8, 16, and 32 bit elements can be extended.");
7987 assert(Scale * EltBits <= 64 && "Cannot zero extend past 64 bits.");
7989 // Found a valid zext mask! Try various lowering strategies based on the
7990 // input type and available ISA extensions.
7991 if (Subtarget->hasSSE41()) {
7992 MVT ExtVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits * Scale),
7993 NumElements / Scale);
7994 return DAG.getNode(ISD::BITCAST, DL, VT,
7995 DAG.getNode(X86ISD::VZEXT, DL, ExtVT, InputV));
7998 // For any extends we can cheat for larger element sizes and use shuffle
7999 // instructions that can fold with a load and/or copy.
8000 if (AnyExt && EltBits == 32) {
8001 int PSHUFDMask[4] = {0, -1, 1, -1};
8003 ISD::BITCAST, DL, VT,
8004 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8005 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8006 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
8008 if (AnyExt && EltBits == 16 && Scale > 2) {
8009 int PSHUFDMask[4] = {0, -1, 0, -1};
8010 InputV = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
8011 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, InputV),
8012 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG));
8013 int PSHUFHWMask[4] = {1, -1, -1, -1};
8015 ISD::BITCAST, DL, VT,
8016 DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16,
8017 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, InputV),
8018 getV4X86ShuffleImm8ForMask(PSHUFHWMask, DAG)));
8021 // If this would require more than 2 unpack instructions to expand, use
8022 // pshufb when available. We can only use more than 2 unpack instructions
8023 // when zero extending i8 elements which also makes it easier to use pshufb.
8024 if (Scale > 4 && EltBits == 8 && Subtarget->hasSSSE3()) {
8025 assert(NumElements == 16 && "Unexpected byte vector width!");
8026 SDValue PSHUFBMask[16];
8027 for (int i = 0; i < 16; ++i)
8029 DAG.getConstant((i % Scale == 0) ? i / Scale : 0x80, MVT::i8);
8030 InputV = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, InputV);
8031 return DAG.getNode(ISD::BITCAST, DL, VT,
8032 DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, InputV,
8033 DAG.getNode(ISD::BUILD_VECTOR, DL,
8034 MVT::v16i8, PSHUFBMask)));
8037 // Otherwise emit a sequence of unpacks.
8039 MVT InputVT = MVT::getVectorVT(MVT::getIntegerVT(EltBits), NumElements);
8040 SDValue Ext = AnyExt ? DAG.getUNDEF(InputVT)
8041 : getZeroVector(InputVT, Subtarget, DAG, DL);
8042 InputV = DAG.getNode(ISD::BITCAST, DL, InputVT, InputV);
8043 InputV = DAG.getNode(X86ISD::UNPCKL, DL, InputVT, InputV, Ext);
8047 } while (Scale > 1);
8048 return DAG.getNode(ISD::BITCAST, DL, VT, InputV);
8051 /// \brief Try to lower a vector shuffle as a zero extension on any microarch.
8053 /// This routine will try to do everything in its power to cleverly lower
8054 /// a shuffle which happens to match the pattern of a zero extend. It doesn't
8055 /// check for the profitability of this lowering, it tries to aggressively
8056 /// match this pattern. It will use all of the micro-architectural details it
8057 /// can to emit an efficient lowering. It handles both blends with all-zero
8058 /// inputs to explicitly zero-extend and undef-lanes (sometimes undef due to
8059 /// masking out later).
8061 /// The reason we have dedicated lowering for zext-style shuffles is that they
8062 /// are both incredibly common and often quite performance sensitive.
8063 static SDValue lowerVectorShuffleAsZeroOrAnyExtend(
8064 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8065 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8066 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8068 int Bits = VT.getSizeInBits();
8069 int NumElements = VT.getVectorNumElements();
8070 assert(VT.getScalarSizeInBits() <= 32 &&
8071 "Exceeds 32-bit integer zero extension limit");
8072 assert((int)Mask.size() == NumElements && "Unexpected shuffle mask size");
8074 // Define a helper function to check a particular ext-scale and lower to it if
8076 auto Lower = [&](int Scale) -> SDValue {
8079 for (int i = 0; i < NumElements; ++i) {
8081 continue; // Valid anywhere but doesn't tell us anything.
8082 if (i % Scale != 0) {
8083 // Each of the extended elements need to be zeroable.
8087 // We no longer are in the anyext case.
8092 // Each of the base elements needs to be consecutive indices into the
8093 // same input vector.
8094 SDValue V = Mask[i] < NumElements ? V1 : V2;
8097 else if (InputV != V)
8098 return SDValue(); // Flip-flopping inputs.
8100 if (Mask[i] % NumElements != i / Scale)
8101 return SDValue(); // Non-consecutive strided elements.
8104 // If we fail to find an input, we have a zero-shuffle which should always
8105 // have already been handled.
8106 // FIXME: Maybe handle this here in case during blending we end up with one?
8110 return lowerVectorShuffleAsSpecificZeroOrAnyExtend(
8111 DL, VT, Scale, AnyExt, InputV, Subtarget, DAG);
8114 // The widest scale possible for extending is to a 64-bit integer.
8115 assert(Bits % 64 == 0 &&
8116 "The number of bits in a vector must be divisible by 64 on x86!");
8117 int NumExtElements = Bits / 64;
8119 // Each iteration, try extending the elements half as much, but into twice as
8121 for (; NumExtElements < NumElements; NumExtElements *= 2) {
8122 assert(NumElements % NumExtElements == 0 &&
8123 "The input vector size must be divisible by the extended size.");
8124 if (SDValue V = Lower(NumElements / NumExtElements))
8128 // General extends failed, but 128-bit vectors may be able to use MOVQ.
8132 // Returns one of the source operands if the shuffle can be reduced to a
8133 // MOVQ, copying the lower 64-bits and zero-extending to the upper 64-bits.
8134 auto CanZExtLowHalf = [&]() {
8135 for (int i = NumElements / 2; i != NumElements; ++i)
8138 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, 0))
8140 if (isSequentialOrUndefInRange(Mask, 0, NumElements / 2, NumElements))
8145 if (SDValue V = CanZExtLowHalf()) {
8146 V = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V);
8147 V = DAG.getNode(X86ISD::VZEXT_MOVL, DL, MVT::v2i64, V);
8148 return DAG.getNode(ISD::BITCAST, DL, VT, V);
8151 // No viable ext lowering found.
8155 /// \brief Try to get a scalar value for a specific element of a vector.
8157 /// Looks through BUILD_VECTOR and SCALAR_TO_VECTOR nodes to find a scalar.
8158 static SDValue getScalarValueForVectorElement(SDValue V, int Idx,
8159 SelectionDAG &DAG) {
8160 MVT VT = V.getSimpleValueType();
8161 MVT EltVT = VT.getVectorElementType();
8162 while (V.getOpcode() == ISD::BITCAST)
8163 V = V.getOperand(0);
8164 // If the bitcasts shift the element size, we can't extract an equivalent
8166 MVT NewVT = V.getSimpleValueType();
8167 if (!NewVT.isVector() || NewVT.getScalarSizeInBits() != VT.getScalarSizeInBits())
8170 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8171 (Idx == 0 && V.getOpcode() == ISD::SCALAR_TO_VECTOR))
8172 return DAG.getNode(ISD::BITCAST, SDLoc(V), EltVT, V.getOperand(Idx));
8177 /// \brief Helper to test for a load that can be folded with x86 shuffles.
8179 /// This is particularly important because the set of instructions varies
8180 /// significantly based on whether the operand is a load or not.
8181 static bool isShuffleFoldableLoad(SDValue V) {
8182 while (V.getOpcode() == ISD::BITCAST)
8183 V = V.getOperand(0);
8185 return ISD::isNON_EXTLoad(V.getNode());
8188 /// \brief Try to lower insertion of a single element into a zero vector.
8190 /// This is a common pattern that we have especially efficient patterns to lower
8191 /// across all subtarget feature sets.
8192 static SDValue lowerVectorShuffleAsElementInsertion(
8193 MVT VT, SDLoc DL, SDValue V1, SDValue V2, ArrayRef<int> Mask,
8194 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8195 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8197 MVT EltVT = VT.getVectorElementType();
8199 int V2Index = std::find_if(Mask.begin(), Mask.end(),
8200 [&Mask](int M) { return M >= (int)Mask.size(); }) -
8202 bool IsV1Zeroable = true;
8203 for (int i = 0, Size = Mask.size(); i < Size; ++i)
8204 if (i != V2Index && !Zeroable[i]) {
8205 IsV1Zeroable = false;
8209 // Check for a single input from a SCALAR_TO_VECTOR node.
8210 // FIXME: All of this should be canonicalized into INSERT_VECTOR_ELT and
8211 // all the smarts here sunk into that routine. However, the current
8212 // lowering of BUILD_VECTOR makes that nearly impossible until the old
8213 // vector shuffle lowering is dead.
8214 if (SDValue V2S = getScalarValueForVectorElement(
8215 V2, Mask[V2Index] - Mask.size(), DAG)) {
8216 // We need to zext the scalar if it is smaller than an i32.
8217 V2S = DAG.getNode(ISD::BITCAST, DL, EltVT, V2S);
8218 if (EltVT == MVT::i8 || EltVT == MVT::i16) {
8219 // Using zext to expand a narrow element won't work for non-zero
8224 // Zero-extend directly to i32.
8226 V2S = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i32, V2S);
8228 V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, ExtVT, V2S);
8229 } else if (Mask[V2Index] != (int)Mask.size() || EltVT == MVT::i8 ||
8230 EltVT == MVT::i16) {
8231 // Either not inserting from the low element of the input or the input
8232 // element size is too small to use VZEXT_MOVL to clear the high bits.
8236 if (!IsV1Zeroable) {
8237 // If V1 can't be treated as a zero vector we have fewer options to lower
8238 // this. We can't support integer vectors or non-zero targets cheaply, and
8239 // the V1 elements can't be permuted in any way.
8240 assert(VT == ExtVT && "Cannot change extended type when non-zeroable!");
8241 if (!VT.isFloatingPoint() || V2Index != 0)
8243 SmallVector<int, 8> V1Mask(Mask.begin(), Mask.end());
8244 V1Mask[V2Index] = -1;
8245 if (!isNoopShuffleMask(V1Mask))
8247 // This is essentially a special case blend operation, but if we have
8248 // general purpose blend operations, they are always faster. Bail and let
8249 // the rest of the lowering handle these as blends.
8250 if (Subtarget->hasSSE41())
8253 // Otherwise, use MOVSD or MOVSS.
8254 assert((EltVT == MVT::f32 || EltVT == MVT::f64) &&
8255 "Only two types of floating point element types to handle!");
8256 return DAG.getNode(EltVT == MVT::f32 ? X86ISD::MOVSS : X86ISD::MOVSD, DL,
8260 // This lowering only works for the low element with floating point vectors.
8261 if (VT.isFloatingPoint() && V2Index != 0)
8264 V2 = DAG.getNode(X86ISD::VZEXT_MOVL, DL, ExtVT, V2);
8266 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8269 // If we have 4 or fewer lanes we can cheaply shuffle the element into
8270 // the desired position. Otherwise it is more efficient to do a vector
8271 // shift left. We know that we can do a vector shift left because all
8272 // the inputs are zero.
8273 if (VT.isFloatingPoint() || VT.getVectorNumElements() <= 4) {
8274 SmallVector<int, 4> V2Shuffle(Mask.size(), 1);
8275 V2Shuffle[V2Index] = 0;
8276 V2 = DAG.getVectorShuffle(VT, DL, V2, DAG.getUNDEF(VT), V2Shuffle);
8278 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, V2);
8280 X86ISD::VSHLDQ, DL, MVT::v2i64, V2,
8282 V2Index * EltVT.getSizeInBits(),
8283 DAG.getTargetLoweringInfo().getScalarShiftAmountTy(MVT::v2i64)));
8284 V2 = DAG.getNode(ISD::BITCAST, DL, VT, V2);
8290 /// \brief Try to lower broadcast of a single element.
8292 /// For convenience, this code also bundles all of the subtarget feature set
8293 /// filtering. While a little annoying to re-dispatch on type here, there isn't
8294 /// a convenient way to factor it out.
8295 static SDValue lowerVectorShuffleAsBroadcast(MVT VT, SDLoc DL, SDValue V,
8297 const X86Subtarget *Subtarget,
8298 SelectionDAG &DAG) {
8299 if (!Subtarget->hasAVX())
8301 if (VT.isInteger() && !Subtarget->hasAVX2())
8304 // Check that the mask is a broadcast.
8305 int BroadcastIdx = -1;
8307 if (M >= 0 && BroadcastIdx == -1)
8309 else if (M >= 0 && M != BroadcastIdx)
8312 assert(BroadcastIdx < (int)Mask.size() && "We only expect to be called with "
8313 "a sorted mask where the broadcast "
8316 // Go up the chain of (vector) values to try and find a scalar load that
8317 // we can combine with the broadcast.
8319 switch (V.getOpcode()) {
8320 case ISD::CONCAT_VECTORS: {
8321 int OperandSize = Mask.size() / V.getNumOperands();
8322 V = V.getOperand(BroadcastIdx / OperandSize);
8323 BroadcastIdx %= OperandSize;
8327 case ISD::INSERT_SUBVECTOR: {
8328 SDValue VOuter = V.getOperand(0), VInner = V.getOperand(1);
8329 auto ConstantIdx = dyn_cast<ConstantSDNode>(V.getOperand(2));
8333 int BeginIdx = (int)ConstantIdx->getZExtValue();
8335 BeginIdx + (int)VInner.getValueType().getVectorNumElements();
8336 if (BroadcastIdx >= BeginIdx && BroadcastIdx < EndIdx) {
8337 BroadcastIdx -= BeginIdx;
8348 // Check if this is a broadcast of a scalar. We special case lowering
8349 // for scalars so that we can more effectively fold with loads.
8350 if (V.getOpcode() == ISD::BUILD_VECTOR ||
8351 (V.getOpcode() == ISD::SCALAR_TO_VECTOR && BroadcastIdx == 0)) {
8352 V = V.getOperand(BroadcastIdx);
8354 // If the scalar isn't a load we can't broadcast from it in AVX1, only with
8356 if (!Subtarget->hasAVX2() && !isShuffleFoldableLoad(V))
8358 } else if (BroadcastIdx != 0 || !Subtarget->hasAVX2()) {
8359 // We can't broadcast from a vector register w/o AVX2, and we can only
8360 // broadcast from the zero-element of a vector register.
8364 return DAG.getNode(X86ISD::VBROADCAST, DL, VT, V);
8367 // Check for whether we can use INSERTPS to perform the shuffle. We only use
8368 // INSERTPS when the V1 elements are already in the correct locations
8369 // because otherwise we can just always use two SHUFPS instructions which
8370 // are much smaller to encode than a SHUFPS and an INSERTPS. We can also
8371 // perform INSERTPS if a single V1 element is out of place and all V2
8372 // elements are zeroable.
8373 static SDValue lowerVectorShuffleAsInsertPS(SDValue Op, SDValue V1, SDValue V2,
8375 SelectionDAG &DAG) {
8376 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8377 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8378 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8379 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8381 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
8384 int V1DstIndex = -1;
8385 int V2DstIndex = -1;
8386 bool V1UsedInPlace = false;
8388 for (int i = 0; i < 4; ++i) {
8389 // Synthesize a zero mask from the zeroable elements (includes undefs).
8395 // Flag if we use any V1 inputs in place.
8397 V1UsedInPlace = true;
8401 // We can only insert a single non-zeroable element.
8402 if (V1DstIndex != -1 || V2DstIndex != -1)
8406 // V1 input out of place for insertion.
8409 // V2 input for insertion.
8414 // Don't bother if we have no (non-zeroable) element for insertion.
8415 if (V1DstIndex == -1 && V2DstIndex == -1)
8418 // Determine element insertion src/dst indices. The src index is from the
8419 // start of the inserted vector, not the start of the concatenated vector.
8420 unsigned V2SrcIndex = 0;
8421 if (V1DstIndex != -1) {
8422 // If we have a V1 input out of place, we use V1 as the V2 element insertion
8423 // and don't use the original V2 at all.
8424 V2SrcIndex = Mask[V1DstIndex];
8425 V2DstIndex = V1DstIndex;
8428 V2SrcIndex = Mask[V2DstIndex] - 4;
8431 // If no V1 inputs are used in place, then the result is created only from
8432 // the zero mask and the V2 insertion - so remove V1 dependency.
8434 V1 = DAG.getUNDEF(MVT::v4f32);
8436 unsigned InsertPSMask = V2SrcIndex << 6 | V2DstIndex << 4 | ZMask;
8437 assert((InsertPSMask & ~0xFFu) == 0 && "Invalid mask!");
8439 // Insert the V2 element into the desired position.
8441 return DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32, V1, V2,
8442 DAG.getConstant(InsertPSMask, MVT::i8));
8445 /// \brief Handle lowering of 2-lane 64-bit floating point shuffles.
8447 /// This is the basis function for the 2-lane 64-bit shuffles as we have full
8448 /// support for floating point shuffles but not integer shuffles. These
8449 /// instructions will incur a domain crossing penalty on some chips though so
8450 /// it is better to avoid lowering through this for integer vectors where
8452 static SDValue lowerV2F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8453 const X86Subtarget *Subtarget,
8454 SelectionDAG &DAG) {
8456 assert(Op.getSimpleValueType() == MVT::v2f64 && "Bad shuffle type!");
8457 assert(V1.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8458 assert(V2.getSimpleValueType() == MVT::v2f64 && "Bad operand type!");
8459 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8460 ArrayRef<int> Mask = SVOp->getMask();
8461 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8463 if (isSingleInputShuffleMask(Mask)) {
8464 // Use low duplicate instructions for masks that match their pattern.
8465 if (Subtarget->hasSSE3())
8466 if (isShuffleEquivalent(V1, V2, Mask, 0, 0))
8467 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v2f64, V1);
8469 // Straight shuffle of a single input vector. Simulate this by using the
8470 // single input as both of the "inputs" to this instruction..
8471 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1);
8473 if (Subtarget->hasAVX()) {
8474 // If we have AVX, we can use VPERMILPS which will allow folding a load
8475 // into the shuffle.
8476 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v2f64, V1,
8477 DAG.getConstant(SHUFPDMask, MVT::i8));
8480 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V1,
8481 DAG.getConstant(SHUFPDMask, MVT::i8));
8483 assert(Mask[0] >= 0 && Mask[0] < 2 && "Non-canonicalized blend!");
8484 assert(Mask[1] >= 2 && "Non-canonicalized blend!");
8486 // If we have a single input, insert that into V1 if we can do so cheaply.
8487 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8488 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8489 MVT::v2f64, DL, V1, V2, Mask, Subtarget, DAG))
8491 // Try inverting the insertion since for v2 masks it is easy to do and we
8492 // can't reliably sort the mask one way or the other.
8493 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8494 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8495 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8496 MVT::v2f64, DL, V2, V1, InverseMask, Subtarget, DAG))
8500 // Try to use one of the special instruction patterns to handle two common
8501 // blend patterns if a zero-blend above didn't work.
8502 if (isShuffleEquivalent(V1, V2, Mask, 0, 3) || isShuffleEquivalent(V1, V2, Mask, 1, 3))
8503 if (SDValue V1S = getScalarValueForVectorElement(V1, Mask[0], DAG))
8504 // We can either use a special instruction to load over the low double or
8505 // to move just the low double.
8507 isShuffleFoldableLoad(V1S) ? X86ISD::MOVLPD : X86ISD::MOVSD,
8509 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64, V1S));
8511 if (Subtarget->hasSSE41())
8512 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2f64, V1, V2, Mask,
8516 // Use dedicated unpack instructions for masks that match their pattern.
8517 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8518 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2f64, V1, V2);
8519 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8520 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2f64, V1, V2);
8522 unsigned SHUFPDMask = (Mask[0] == 1) | (((Mask[1] - 2) == 1) << 1);
8523 return DAG.getNode(X86ISD::SHUFP, SDLoc(Op), MVT::v2f64, V1, V2,
8524 DAG.getConstant(SHUFPDMask, MVT::i8));
8527 /// \brief Handle lowering of 2-lane 64-bit integer shuffles.
8529 /// Tries to lower a 2-lane 64-bit shuffle using shuffle operations provided by
8530 /// the integer unit to minimize domain crossing penalties. However, for blends
8531 /// it falls back to the floating point shuffle operation with appropriate bit
8533 static SDValue lowerV2I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8534 const X86Subtarget *Subtarget,
8535 SelectionDAG &DAG) {
8537 assert(Op.getSimpleValueType() == MVT::v2i64 && "Bad shuffle type!");
8538 assert(V1.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8539 assert(V2.getSimpleValueType() == MVT::v2i64 && "Bad operand type!");
8540 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8541 ArrayRef<int> Mask = SVOp->getMask();
8542 assert(Mask.size() == 2 && "Unexpected mask size for v2 shuffle!");
8544 if (isSingleInputShuffleMask(Mask)) {
8545 // Check for being able to broadcast a single element.
8546 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v2i64, DL, V1,
8547 Mask, Subtarget, DAG))
8550 // Straight shuffle of a single input vector. For everything from SSE2
8551 // onward this has a single fast instruction with no scary immediates.
8552 // We have to map the mask as it is actually a v4i32 shuffle instruction.
8553 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V1);
8554 int WidenedMask[4] = {
8555 std::max(Mask[0], 0) * 2, std::max(Mask[0], 0) * 2 + 1,
8556 std::max(Mask[1], 0) * 2, std::max(Mask[1], 0) * 2 + 1};
8558 ISD::BITCAST, DL, MVT::v2i64,
8559 DAG.getNode(X86ISD::PSHUFD, SDLoc(Op), MVT::v4i32, V1,
8560 getV4X86ShuffleImm8ForMask(WidenedMask, DAG)));
8563 // Try to use byte shift instructions.
8564 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8565 DL, MVT::v2i64, V1, V2, Mask, DAG))
8568 // If we have a single input from V2 insert that into V1 if we can do so
8570 if ((Mask[0] >= 2) + (Mask[1] >= 2) == 1) {
8571 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8572 MVT::v2i64, DL, V1, V2, Mask, Subtarget, DAG))
8574 // Try inverting the insertion since for v2 masks it is easy to do and we
8575 // can't reliably sort the mask one way or the other.
8576 int InverseMask[2] = {Mask[0] < 0 ? -1 : (Mask[0] ^ 2),
8577 Mask[1] < 0 ? -1 : (Mask[1] ^ 2)};
8578 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
8579 MVT::v2i64, DL, V2, V1, InverseMask, Subtarget, DAG))
8583 if (Subtarget->hasSSE41())
8584 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v2i64, V1, V2, Mask,
8588 // Use dedicated unpack instructions for masks that match their pattern.
8589 if (isShuffleEquivalent(V1, V2, Mask, 0, 2))
8590 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, V1, V2);
8591 if (isShuffleEquivalent(V1, V2, Mask, 1, 3))
8592 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v2i64, V1, V2);
8594 // Try to use byte rotation instructions.
8595 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8596 if (Subtarget->hasSSSE3())
8597 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8598 DL, MVT::v2i64, V1, V2, Mask, Subtarget, DAG))
8601 // We implement this with SHUFPD which is pretty lame because it will likely
8602 // incur 2 cycles of stall for integer vectors on Nehalem and older chips.
8603 // However, all the alternatives are still more cycles and newer chips don't
8604 // have this problem. It would be really nice if x86 had better shuffles here.
8605 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V1);
8606 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, V2);
8607 return DAG.getNode(ISD::BITCAST, DL, MVT::v2i64,
8608 DAG.getVectorShuffle(MVT::v2f64, DL, V1, V2, Mask));
8611 /// \brief Test whether this can be lowered with a single SHUFPS instruction.
8613 /// This is used to disable more specialized lowerings when the shufps lowering
8614 /// will happen to be efficient.
8615 static bool isSingleSHUFPSMask(ArrayRef<int> Mask) {
8616 // This routine only handles 128-bit shufps.
8617 assert(Mask.size() == 4 && "Unsupported mask size!");
8619 // To lower with a single SHUFPS we need to have the low half and high half
8620 // each requiring a single input.
8621 if (Mask[0] != -1 && Mask[1] != -1 && (Mask[0] < 4) != (Mask[1] < 4))
8623 if (Mask[2] != -1 && Mask[3] != -1 && (Mask[2] < 4) != (Mask[3] < 4))
8629 /// \brief Lower a vector shuffle using the SHUFPS instruction.
8631 /// This is a helper routine dedicated to lowering vector shuffles using SHUFPS.
8632 /// It makes no assumptions about whether this is the *best* lowering, it simply
8634 static SDValue lowerVectorShuffleWithSHUFPS(SDLoc DL, MVT VT,
8635 ArrayRef<int> Mask, SDValue V1,
8636 SDValue V2, SelectionDAG &DAG) {
8637 SDValue LowV = V1, HighV = V2;
8638 int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
8641 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8643 if (NumV2Elements == 1) {
8645 std::find_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; }) -
8648 // Compute the index adjacent to V2Index and in the same half by toggling
8650 int V2AdjIndex = V2Index ^ 1;
8652 if (Mask[V2AdjIndex] == -1) {
8653 // Handles all the cases where we have a single V2 element and an undef.
8654 // This will only ever happen in the high lanes because we commute the
8655 // vector otherwise.
8657 std::swap(LowV, HighV);
8658 NewMask[V2Index] -= 4;
8660 // Handle the case where the V2 element ends up adjacent to a V1 element.
8661 // To make this work, blend them together as the first step.
8662 int V1Index = V2AdjIndex;
8663 int BlendMask[4] = {Mask[V2Index] - 4, 0, Mask[V1Index], 0};
8664 V2 = DAG.getNode(X86ISD::SHUFP, DL, VT, V2, V1,
8665 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8667 // Now proceed to reconstruct the final blend as we have the necessary
8668 // high or low half formed.
8675 NewMask[V1Index] = 2; // We put the V1 element in V2[2].
8676 NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
8678 } else if (NumV2Elements == 2) {
8679 if (Mask[0] < 4 && Mask[1] < 4) {
8680 // Handle the easy case where we have V1 in the low lanes and V2 in the
8684 } else if (Mask[2] < 4 && Mask[3] < 4) {
8685 // We also handle the reversed case because this utility may get called
8686 // when we detect a SHUFPS pattern but can't easily commute the shuffle to
8687 // arrange things in the right direction.
8693 // We have a mixture of V1 and V2 in both low and high lanes. Rather than
8694 // trying to place elements directly, just blend them and set up the final
8695 // shuffle to place them.
8697 // The first two blend mask elements are for V1, the second two are for
8699 int BlendMask[4] = {Mask[0] < 4 ? Mask[0] : Mask[1],
8700 Mask[2] < 4 ? Mask[2] : Mask[3],
8701 (Mask[0] >= 4 ? Mask[0] : Mask[1]) - 4,
8702 (Mask[2] >= 4 ? Mask[2] : Mask[3]) - 4};
8703 V1 = DAG.getNode(X86ISD::SHUFP, DL, VT, V1, V2,
8704 getV4X86ShuffleImm8ForMask(BlendMask, DAG));
8706 // Now we do a normal shuffle of V1 by giving V1 as both operands to
8709 NewMask[0] = Mask[0] < 4 ? 0 : 2;
8710 NewMask[1] = Mask[0] < 4 ? 2 : 0;
8711 NewMask[2] = Mask[2] < 4 ? 1 : 3;
8712 NewMask[3] = Mask[2] < 4 ? 3 : 1;
8715 return DAG.getNode(X86ISD::SHUFP, DL, VT, LowV, HighV,
8716 getV4X86ShuffleImm8ForMask(NewMask, DAG));
8719 /// \brief Lower 4-lane 32-bit floating point shuffles.
8721 /// Uses instructions exclusively from the floating point unit to minimize
8722 /// domain crossing penalties, as these are sufficient to implement all v4f32
8724 static SDValue lowerV4F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8725 const X86Subtarget *Subtarget,
8726 SelectionDAG &DAG) {
8728 assert(Op.getSimpleValueType() == MVT::v4f32 && "Bad shuffle type!");
8729 assert(V1.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8730 assert(V2.getSimpleValueType() == MVT::v4f32 && "Bad operand type!");
8731 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8732 ArrayRef<int> Mask = SVOp->getMask();
8733 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8736 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8738 if (NumV2Elements == 0) {
8739 // Check for being able to broadcast a single element.
8740 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f32, DL, V1,
8741 Mask, Subtarget, DAG))
8744 // Use even/odd duplicate instructions for masks that match their pattern.
8745 if (Subtarget->hasSSE3()) {
8746 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
8747 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v4f32, V1);
8748 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3))
8749 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v4f32, V1);
8752 if (Subtarget->hasAVX()) {
8753 // If we have AVX, we can use VPERMILPS which will allow folding a load
8754 // into the shuffle.
8755 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f32, V1,
8756 getV4X86ShuffleImm8ForMask(Mask, DAG));
8759 // Otherwise, use a straight shuffle of a single input vector. We pass the
8760 // input vector to both operands to simulate this with a SHUFPS.
8761 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f32, V1, V1,
8762 getV4X86ShuffleImm8ForMask(Mask, DAG));
8765 // There are special ways we can lower some single-element blends. However, we
8766 // have custom ways we can lower more complex single-element blends below that
8767 // we defer to if both this and BLENDPS fail to match, so restrict this to
8768 // when the V2 input is targeting element 0 of the mask -- that is the fast
8770 if (NumV2Elements == 1 && Mask[0] >= 4)
8771 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4f32, DL, V1, V2,
8772 Mask, Subtarget, DAG))
8775 if (Subtarget->hasSSE41()) {
8776 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f32, V1, V2, Mask,
8780 // Use INSERTPS if we can complete the shuffle efficiently.
8781 if (SDValue V = lowerVectorShuffleAsInsertPS(Op, V1, V2, Mask, DAG))
8784 if (!isSingleSHUFPSMask(Mask))
8785 if (SDValue BlendPerm = lowerVectorShuffleAsBlendAndPermute(
8786 DL, MVT::v4f32, V1, V2, Mask, DAG))
8790 // Use dedicated unpack instructions for masks that match their pattern.
8791 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8792 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f32, V1, V2);
8793 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8794 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f32, V1, V2);
8796 // Otherwise fall back to a SHUFPS lowering strategy.
8797 return lowerVectorShuffleWithSHUFPS(DL, MVT::v4f32, Mask, V1, V2, DAG);
8800 /// \brief Lower 4-lane i32 vector shuffles.
8802 /// We try to handle these with integer-domain shuffles where we can, but for
8803 /// blends we use the floating point domain blend instructions.
8804 static SDValue lowerV4I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
8805 const X86Subtarget *Subtarget,
8806 SelectionDAG &DAG) {
8808 assert(Op.getSimpleValueType() == MVT::v4i32 && "Bad shuffle type!");
8809 assert(V1.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8810 assert(V2.getSimpleValueType() == MVT::v4i32 && "Bad operand type!");
8811 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
8812 ArrayRef<int> Mask = SVOp->getMask();
8813 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
8815 // Whenever we can lower this as a zext, that instruction is strictly faster
8816 // than any alternative. It also allows us to fold memory operands into the
8817 // shuffle in many cases.
8818 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v4i32, V1, V2,
8819 Mask, Subtarget, DAG))
8823 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
8825 if (NumV2Elements == 0) {
8826 // Check for being able to broadcast a single element.
8827 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i32, DL, V1,
8828 Mask, Subtarget, DAG))
8831 // Straight shuffle of a single input vector. For everything from SSE2
8832 // onward this has a single fast instruction with no scary immediates.
8833 // We coerce the shuffle pattern to be compatible with UNPCK instructions
8834 // but we aren't actually going to use the UNPCK instruction because doing
8835 // so prevents folding a load into this instruction or making a copy.
8836 const int UnpackLoMask[] = {0, 0, 1, 1};
8837 const int UnpackHiMask[] = {2, 2, 3, 3};
8838 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 1, 1))
8839 Mask = UnpackLoMask;
8840 else if (isShuffleEquivalent(V1, V2, Mask, 2, 2, 3, 3))
8841 Mask = UnpackHiMask;
8843 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V1,
8844 getV4X86ShuffleImm8ForMask(Mask, DAG));
8847 // Try to use bit shift instructions.
8848 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8849 DL, MVT::v4i32, V1, V2, Mask, DAG))
8852 // Try to use byte shift instructions.
8853 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8854 DL, MVT::v4i32, V1, V2, Mask, DAG))
8857 // There are special ways we can lower some single-element blends.
8858 if (NumV2Elements == 1)
8859 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v4i32, DL, V1, V2,
8860 Mask, Subtarget, DAG))
8863 if (Subtarget->hasSSE41())
8864 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i32, V1, V2, Mask,
8868 if (SDValue Masked =
8869 lowerVectorShuffleAsBitMask(DL, MVT::v4i32, V1, V2, Mask, DAG))
8872 // Use dedicated unpack instructions for masks that match their pattern.
8873 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 1, 5))
8874 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i32, V1, V2);
8875 if (isShuffleEquivalent(V1, V2, Mask, 2, 6, 3, 7))
8876 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i32, V1, V2);
8878 // Try to use byte rotation instructions.
8879 // Its more profitable for pre-SSSE3 to use shuffles/unpacks.
8880 if (Subtarget->hasSSSE3())
8881 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8882 DL, MVT::v4i32, V1, V2, Mask, Subtarget, DAG))
8885 // We implement this with SHUFPS because it can blend from two vectors.
8886 // Because we're going to eventually use SHUFPS, we use SHUFPS even to build
8887 // up the inputs, bypassing domain shift penalties that we would encur if we
8888 // directly used PSHUFD on Nehalem and older. For newer chips, this isn't
8890 return DAG.getNode(ISD::BITCAST, DL, MVT::v4i32,
8891 DAG.getVectorShuffle(
8893 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V1),
8894 DAG.getNode(ISD::BITCAST, DL, MVT::v4f32, V2), Mask));
8897 /// \brief Lowering of single-input v8i16 shuffles is the cornerstone of SSE2
8898 /// shuffle lowering, and the most complex part.
8900 /// The lowering strategy is to try to form pairs of input lanes which are
8901 /// targeted at the same half of the final vector, and then use a dword shuffle
8902 /// to place them onto the right half, and finally unpack the paired lanes into
8903 /// their final position.
8905 /// The exact breakdown of how to form these dword pairs and align them on the
8906 /// correct sides is really tricky. See the comments within the function for
8907 /// more of the details.
8908 static SDValue lowerV8I16SingleInputVectorShuffle(
8909 SDLoc DL, SDValue V, MutableArrayRef<int> Mask,
8910 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
8911 assert(V.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
8912 MutableArrayRef<int> LoMask = Mask.slice(0, 4);
8913 MutableArrayRef<int> HiMask = Mask.slice(4, 4);
8915 SmallVector<int, 4> LoInputs;
8916 std::copy_if(LoMask.begin(), LoMask.end(), std::back_inserter(LoInputs),
8917 [](int M) { return M >= 0; });
8918 std::sort(LoInputs.begin(), LoInputs.end());
8919 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()), LoInputs.end());
8920 SmallVector<int, 4> HiInputs;
8921 std::copy_if(HiMask.begin(), HiMask.end(), std::back_inserter(HiInputs),
8922 [](int M) { return M >= 0; });
8923 std::sort(HiInputs.begin(), HiInputs.end());
8924 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()), HiInputs.end());
8926 std::lower_bound(LoInputs.begin(), LoInputs.end(), 4) - LoInputs.begin();
8927 int NumHToL = LoInputs.size() - NumLToL;
8929 std::lower_bound(HiInputs.begin(), HiInputs.end(), 4) - HiInputs.begin();
8930 int NumHToH = HiInputs.size() - NumLToH;
8931 MutableArrayRef<int> LToLInputs(LoInputs.data(), NumLToL);
8932 MutableArrayRef<int> LToHInputs(HiInputs.data(), NumLToH);
8933 MutableArrayRef<int> HToLInputs(LoInputs.data() + NumLToL, NumHToL);
8934 MutableArrayRef<int> HToHInputs(HiInputs.data() + NumLToH, NumHToH);
8936 // Check for being able to broadcast a single element.
8937 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i16, DL, V,
8938 Mask, Subtarget, DAG))
8941 // Try to use bit shift instructions.
8942 if (SDValue Shift = lowerVectorShuffleAsBitShift(
8943 DL, MVT::v8i16, V, V, Mask, DAG))
8946 // Try to use byte shift instructions.
8947 if (SDValue Shift = lowerVectorShuffleAsByteShift(
8948 DL, MVT::v8i16, V, V, Mask, DAG))
8951 // Use dedicated unpack instructions for masks that match their pattern.
8952 if (isShuffleEquivalent(V, V, Mask, 0, 0, 1, 1, 2, 2, 3, 3))
8953 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V, V);
8954 if (isShuffleEquivalent(V, V, Mask, 4, 4, 5, 5, 6, 6, 7, 7))
8955 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V, V);
8957 // Try to use byte rotation instructions.
8958 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
8959 DL, MVT::v8i16, V, V, Mask, Subtarget, DAG))
8962 // Simplify the 1-into-3 and 3-into-1 cases with a single pshufd. For all
8963 // such inputs we can swap two of the dwords across the half mark and end up
8964 // with <=2 inputs to each half in each half. Once there, we can fall through
8965 // to the generic code below. For example:
8967 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8968 // Mask: [0, 1, 2, 7, 4, 5, 6, 3] -----------------> [0, 1, 4, 7, 2, 3, 6, 5]
8970 // However in some very rare cases we have a 1-into-3 or 3-into-1 on one half
8971 // and an existing 2-into-2 on the other half. In this case we may have to
8972 // pre-shuffle the 2-into-2 half to avoid turning it into a 3-into-1 or
8973 // 1-into-3 which could cause us to cycle endlessly fixing each side in turn.
8974 // Fortunately, we don't have to handle anything but a 2-into-2 pattern
8975 // because any other situation (including a 3-into-1 or 1-into-3 in the other
8976 // half than the one we target for fixing) will be fixed when we re-enter this
8977 // path. We will also combine away any sequence of PSHUFD instructions that
8978 // result into a single instruction. Here is an example of the tricky case:
8980 // Input: [a, b, c, d, e, f, g, h] -PSHUFD[0,2,1,3]-> [a, b, e, f, c, d, g, h]
8981 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -THIS-IS-BAD!!!!-> [5, 7, 1, 0, 4, 7, 5, 3]
8983 // This now has a 1-into-3 in the high half! Instead, we do two shuffles:
8985 // Input: [a, b, c, d, e, f, g, h] PSHUFHW[0,2,1,3]-> [a, b, c, d, e, g, f, h]
8986 // Mask: [3, 7, 1, 0, 2, 7, 3, 5] -----------------> [3, 7, 1, 0, 2, 7, 3, 6]
8988 // Input: [a, b, c, d, e, g, f, h] -PSHUFD[0,2,1,3]-> [a, b, e, g, c, d, f, h]
8989 // Mask: [3, 7, 1, 0, 2, 7, 3, 6] -----------------> [5, 7, 1, 0, 4, 7, 5, 6]
8991 // The result is fine to be handled by the generic logic.
8992 auto balanceSides = [&](ArrayRef<int> AToAInputs, ArrayRef<int> BToAInputs,
8993 ArrayRef<int> BToBInputs, ArrayRef<int> AToBInputs,
8994 int AOffset, int BOffset) {
8995 assert((AToAInputs.size() == 3 || AToAInputs.size() == 1) &&
8996 "Must call this with A having 3 or 1 inputs from the A half.");
8997 assert((BToAInputs.size() == 1 || BToAInputs.size() == 3) &&
8998 "Must call this with B having 1 or 3 inputs from the B half.");
8999 assert(AToAInputs.size() + BToAInputs.size() == 4 &&
9000 "Must call this with either 3:1 or 1:3 inputs (summing to 4).");
9002 // Compute the index of dword with only one word among the three inputs in
9003 // a half by taking the sum of the half with three inputs and subtracting
9004 // the sum of the actual three inputs. The difference is the remaining
9007 int &TripleDWord = AToAInputs.size() == 3 ? ADWord : BDWord;
9008 int &OneInputDWord = AToAInputs.size() == 3 ? BDWord : ADWord;
9009 int TripleInputOffset = AToAInputs.size() == 3 ? AOffset : BOffset;
9010 ArrayRef<int> TripleInputs = AToAInputs.size() == 3 ? AToAInputs : BToAInputs;
9011 int OneInput = AToAInputs.size() == 3 ? BToAInputs[0] : AToAInputs[0];
9012 int TripleInputSum = 0 + 1 + 2 + 3 + (4 * TripleInputOffset);
9013 int TripleNonInputIdx =
9014 TripleInputSum - std::accumulate(TripleInputs.begin(), TripleInputs.end(), 0);
9015 TripleDWord = TripleNonInputIdx / 2;
9017 // We use xor with one to compute the adjacent DWord to whichever one the
9019 OneInputDWord = (OneInput / 2) ^ 1;
9021 // Check for one tricky case: We're fixing a 3<-1 or a 1<-3 shuffle for AToA
9022 // and BToA inputs. If there is also such a problem with the BToB and AToB
9023 // inputs, we don't try to fix it necessarily -- we'll recurse and see it in
9024 // the next pass. However, if we have a 2<-2 in the BToB and AToB inputs, it
9025 // is essential that we don't *create* a 3<-1 as then we might oscillate.
9026 if (BToBInputs.size() == 2 && AToBInputs.size() == 2) {
9027 // Compute how many inputs will be flipped by swapping these DWords. We
9029 // to balance this to ensure we don't form a 3-1 shuffle in the other
9031 int NumFlippedAToBInputs =
9032 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord) +
9033 std::count(AToBInputs.begin(), AToBInputs.end(), 2 * ADWord + 1);
9034 int NumFlippedBToBInputs =
9035 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord) +
9036 std::count(BToBInputs.begin(), BToBInputs.end(), 2 * BDWord + 1);
9037 if ((NumFlippedAToBInputs == 1 &&
9038 (NumFlippedBToBInputs == 0 || NumFlippedBToBInputs == 2)) ||
9039 (NumFlippedBToBInputs == 1 &&
9040 (NumFlippedAToBInputs == 0 || NumFlippedAToBInputs == 2))) {
9041 // We choose whether to fix the A half or B half based on whether that
9042 // half has zero flipped inputs. At zero, we may not be able to fix it
9043 // with that half. We also bias towards fixing the B half because that
9044 // will more commonly be the high half, and we have to bias one way.
9045 auto FixFlippedInputs = [&V, &DL, &Mask, &DAG](int PinnedIdx, int DWord,
9046 ArrayRef<int> Inputs) {
9047 int FixIdx = PinnedIdx ^ 1; // The adjacent slot to the pinned slot.
9048 bool IsFixIdxInput = std::find(Inputs.begin(), Inputs.end(),
9049 PinnedIdx ^ 1) != Inputs.end();
9050 // Determine whether the free index is in the flipped dword or the
9051 // unflipped dword based on where the pinned index is. We use this bit
9052 // in an xor to conditionally select the adjacent dword.
9053 int FixFreeIdx = 2 * (DWord ^ (PinnedIdx / 2 == DWord));
9054 bool IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9055 FixFreeIdx) != Inputs.end();
9056 if (IsFixIdxInput == IsFixFreeIdxInput)
9058 IsFixFreeIdxInput = std::find(Inputs.begin(), Inputs.end(),
9059 FixFreeIdx) != Inputs.end();
9060 assert(IsFixIdxInput != IsFixFreeIdxInput &&
9061 "We need to be changing the number of flipped inputs!");
9062 int PSHUFHalfMask[] = {0, 1, 2, 3};
9063 std::swap(PSHUFHalfMask[FixFreeIdx % 4], PSHUFHalfMask[FixIdx % 4]);
9064 V = DAG.getNode(FixIdx < 4 ? X86ISD::PSHUFLW : X86ISD::PSHUFHW, DL,
9066 getV4X86ShuffleImm8ForMask(PSHUFHalfMask, DAG));
9069 if (M != -1 && M == FixIdx)
9071 else if (M != -1 && M == FixFreeIdx)
9074 if (NumFlippedBToBInputs != 0) {
9076 BToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9077 FixFlippedInputs(BPinnedIdx, BDWord, BToBInputs);
9079 assert(NumFlippedAToBInputs != 0 && "Impossible given predicates!");
9081 AToAInputs.size() == 3 ? TripleNonInputIdx : OneInput;
9082 FixFlippedInputs(APinnedIdx, ADWord, AToBInputs);
9087 int PSHUFDMask[] = {0, 1, 2, 3};
9088 PSHUFDMask[ADWord] = BDWord;
9089 PSHUFDMask[BDWord] = ADWord;
9090 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9091 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9092 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9093 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9095 // Adjust the mask to match the new locations of A and B.
9097 if (M != -1 && M/2 == ADWord)
9098 M = 2 * BDWord + M % 2;
9099 else if (M != -1 && M/2 == BDWord)
9100 M = 2 * ADWord + M % 2;
9102 // Recurse back into this routine to re-compute state now that this isn't
9103 // a 3 and 1 problem.
9104 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9107 if ((NumLToL == 3 && NumHToL == 1) || (NumLToL == 1 && NumHToL == 3))
9108 return balanceSides(LToLInputs, HToLInputs, HToHInputs, LToHInputs, 0, 4);
9109 else if ((NumHToH == 3 && NumLToH == 1) || (NumHToH == 1 && NumLToH == 3))
9110 return balanceSides(HToHInputs, LToHInputs, LToLInputs, HToLInputs, 4, 0);
9112 // At this point there are at most two inputs to the low and high halves from
9113 // each half. That means the inputs can always be grouped into dwords and
9114 // those dwords can then be moved to the correct half with a dword shuffle.
9115 // We use at most one low and one high word shuffle to collect these paired
9116 // inputs into dwords, and finally a dword shuffle to place them.
9117 int PSHUFLMask[4] = {-1, -1, -1, -1};
9118 int PSHUFHMask[4] = {-1, -1, -1, -1};
9119 int PSHUFDMask[4] = {-1, -1, -1, -1};
9121 // First fix the masks for all the inputs that are staying in their
9122 // original halves. This will then dictate the targets of the cross-half
9124 auto fixInPlaceInputs =
9125 [&PSHUFDMask](ArrayRef<int> InPlaceInputs, ArrayRef<int> IncomingInputs,
9126 MutableArrayRef<int> SourceHalfMask,
9127 MutableArrayRef<int> HalfMask, int HalfOffset) {
9128 if (InPlaceInputs.empty())
9130 if (InPlaceInputs.size() == 1) {
9131 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9132 InPlaceInputs[0] - HalfOffset;
9133 PSHUFDMask[InPlaceInputs[0] / 2] = InPlaceInputs[0] / 2;
9136 if (IncomingInputs.empty()) {
9137 // Just fix all of the in place inputs.
9138 for (int Input : InPlaceInputs) {
9139 SourceHalfMask[Input - HalfOffset] = Input - HalfOffset;
9140 PSHUFDMask[Input / 2] = Input / 2;
9145 assert(InPlaceInputs.size() == 2 && "Cannot handle 3 or 4 inputs!");
9146 SourceHalfMask[InPlaceInputs[0] - HalfOffset] =
9147 InPlaceInputs[0] - HalfOffset;
9148 // Put the second input next to the first so that they are packed into
9149 // a dword. We find the adjacent index by toggling the low bit.
9150 int AdjIndex = InPlaceInputs[0] ^ 1;
9151 SourceHalfMask[AdjIndex - HalfOffset] = InPlaceInputs[1] - HalfOffset;
9152 std::replace(HalfMask.begin(), HalfMask.end(), InPlaceInputs[1], AdjIndex);
9153 PSHUFDMask[AdjIndex / 2] = AdjIndex / 2;
9155 fixInPlaceInputs(LToLInputs, HToLInputs, PSHUFLMask, LoMask, 0);
9156 fixInPlaceInputs(HToHInputs, LToHInputs, PSHUFHMask, HiMask, 4);
9158 // Now gather the cross-half inputs and place them into a free dword of
9159 // their target half.
9160 // FIXME: This operation could almost certainly be simplified dramatically to
9161 // look more like the 3-1 fixing operation.
9162 auto moveInputsToRightHalf = [&PSHUFDMask](
9163 MutableArrayRef<int> IncomingInputs, ArrayRef<int> ExistingInputs,
9164 MutableArrayRef<int> SourceHalfMask, MutableArrayRef<int> HalfMask,
9165 MutableArrayRef<int> FinalSourceHalfMask, int SourceOffset,
9167 auto isWordClobbered = [](ArrayRef<int> SourceHalfMask, int Word) {
9168 return SourceHalfMask[Word] != -1 && SourceHalfMask[Word] != Word;
9170 auto isDWordClobbered = [&isWordClobbered](ArrayRef<int> SourceHalfMask,
9172 int LowWord = Word & ~1;
9173 int HighWord = Word | 1;
9174 return isWordClobbered(SourceHalfMask, LowWord) ||
9175 isWordClobbered(SourceHalfMask, HighWord);
9178 if (IncomingInputs.empty())
9181 if (ExistingInputs.empty()) {
9182 // Map any dwords with inputs from them into the right half.
9183 for (int Input : IncomingInputs) {
9184 // If the source half mask maps over the inputs, turn those into
9185 // swaps and use the swapped lane.
9186 if (isWordClobbered(SourceHalfMask, Input - SourceOffset)) {
9187 if (SourceHalfMask[SourceHalfMask[Input - SourceOffset]] == -1) {
9188 SourceHalfMask[SourceHalfMask[Input - SourceOffset]] =
9189 Input - SourceOffset;
9190 // We have to swap the uses in our half mask in one sweep.
9191 for (int &M : HalfMask)
9192 if (M == SourceHalfMask[Input - SourceOffset] + SourceOffset)
9194 else if (M == Input)
9195 M = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9197 assert(SourceHalfMask[SourceHalfMask[Input - SourceOffset]] ==
9198 Input - SourceOffset &&
9199 "Previous placement doesn't match!");
9201 // Note that this correctly re-maps both when we do a swap and when
9202 // we observe the other side of the swap above. We rely on that to
9203 // avoid swapping the members of the input list directly.
9204 Input = SourceHalfMask[Input - SourceOffset] + SourceOffset;
9207 // Map the input's dword into the correct half.
9208 if (PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] == -1)
9209 PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] = Input / 2;
9211 assert(PSHUFDMask[(Input - SourceOffset + DestOffset) / 2] ==
9213 "Previous placement doesn't match!");
9216 // And just directly shift any other-half mask elements to be same-half
9217 // as we will have mirrored the dword containing the element into the
9218 // same position within that half.
9219 for (int &M : HalfMask)
9220 if (M >= SourceOffset && M < SourceOffset + 4) {
9221 M = M - SourceOffset + DestOffset;
9222 assert(M >= 0 && "This should never wrap below zero!");
9227 // Ensure we have the input in a viable dword of its current half. This
9228 // is particularly tricky because the original position may be clobbered
9229 // by inputs being moved and *staying* in that half.
9230 if (IncomingInputs.size() == 1) {
9231 if (isWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9232 int InputFixed = std::find(std::begin(SourceHalfMask),
9233 std::end(SourceHalfMask), -1) -
9234 std::begin(SourceHalfMask) + SourceOffset;
9235 SourceHalfMask[InputFixed - SourceOffset] =
9236 IncomingInputs[0] - SourceOffset;
9237 std::replace(HalfMask.begin(), HalfMask.end(), IncomingInputs[0],
9239 IncomingInputs[0] = InputFixed;
9241 } else if (IncomingInputs.size() == 2) {
9242 if (IncomingInputs[0] / 2 != IncomingInputs[1] / 2 ||
9243 isDWordClobbered(SourceHalfMask, IncomingInputs[0] - SourceOffset)) {
9244 // We have two non-adjacent or clobbered inputs we need to extract from
9245 // the source half. To do this, we need to map them into some adjacent
9246 // dword slot in the source mask.
9247 int InputsFixed[2] = {IncomingInputs[0] - SourceOffset,
9248 IncomingInputs[1] - SourceOffset};
9250 // If there is a free slot in the source half mask adjacent to one of
9251 // the inputs, place the other input in it. We use (Index XOR 1) to
9252 // compute an adjacent index.
9253 if (!isWordClobbered(SourceHalfMask, InputsFixed[0]) &&
9254 SourceHalfMask[InputsFixed[0] ^ 1] == -1) {
9255 SourceHalfMask[InputsFixed[0]] = InputsFixed[0];
9256 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9257 InputsFixed[1] = InputsFixed[0] ^ 1;
9258 } else if (!isWordClobbered(SourceHalfMask, InputsFixed[1]) &&
9259 SourceHalfMask[InputsFixed[1] ^ 1] == -1) {
9260 SourceHalfMask[InputsFixed[1]] = InputsFixed[1];
9261 SourceHalfMask[InputsFixed[1] ^ 1] = InputsFixed[0];
9262 InputsFixed[0] = InputsFixed[1] ^ 1;
9263 } else if (SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] == -1 &&
9264 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] == -1) {
9265 // The two inputs are in the same DWord but it is clobbered and the
9266 // adjacent DWord isn't used at all. Move both inputs to the free
9268 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1)] = InputsFixed[0];
9269 SourceHalfMask[2 * ((InputsFixed[0] / 2) ^ 1) + 1] = InputsFixed[1];
9270 InputsFixed[0] = 2 * ((InputsFixed[0] / 2) ^ 1);
9271 InputsFixed[1] = 2 * ((InputsFixed[0] / 2) ^ 1) + 1;
9273 // The only way we hit this point is if there is no clobbering
9274 // (because there are no off-half inputs to this half) and there is no
9275 // free slot adjacent to one of the inputs. In this case, we have to
9276 // swap an input with a non-input.
9277 for (int i = 0; i < 4; ++i)
9278 assert((SourceHalfMask[i] == -1 || SourceHalfMask[i] == i) &&
9279 "We can't handle any clobbers here!");
9280 assert(InputsFixed[1] != (InputsFixed[0] ^ 1) &&
9281 "Cannot have adjacent inputs here!");
9283 SourceHalfMask[InputsFixed[0] ^ 1] = InputsFixed[1];
9284 SourceHalfMask[InputsFixed[1]] = InputsFixed[0] ^ 1;
9286 // We also have to update the final source mask in this case because
9287 // it may need to undo the above swap.
9288 for (int &M : FinalSourceHalfMask)
9289 if (M == (InputsFixed[0] ^ 1) + SourceOffset)
9290 M = InputsFixed[1] + SourceOffset;
9291 else if (M == InputsFixed[1] + SourceOffset)
9292 M = (InputsFixed[0] ^ 1) + SourceOffset;
9294 InputsFixed[1] = InputsFixed[0] ^ 1;
9297 // Point everything at the fixed inputs.
9298 for (int &M : HalfMask)
9299 if (M == IncomingInputs[0])
9300 M = InputsFixed[0] + SourceOffset;
9301 else if (M == IncomingInputs[1])
9302 M = InputsFixed[1] + SourceOffset;
9304 IncomingInputs[0] = InputsFixed[0] + SourceOffset;
9305 IncomingInputs[1] = InputsFixed[1] + SourceOffset;
9308 llvm_unreachable("Unhandled input size!");
9311 // Now hoist the DWord down to the right half.
9312 int FreeDWord = (PSHUFDMask[DestOffset / 2] == -1 ? 0 : 1) + DestOffset / 2;
9313 assert(PSHUFDMask[FreeDWord] == -1 && "DWord not free");
9314 PSHUFDMask[FreeDWord] = IncomingInputs[0] / 2;
9315 for (int &M : HalfMask)
9316 for (int Input : IncomingInputs)
9318 M = FreeDWord * 2 + Input % 2;
9320 moveInputsToRightHalf(HToLInputs, LToLInputs, PSHUFHMask, LoMask, HiMask,
9321 /*SourceOffset*/ 4, /*DestOffset*/ 0);
9322 moveInputsToRightHalf(LToHInputs, HToHInputs, PSHUFLMask, HiMask, LoMask,
9323 /*SourceOffset*/ 0, /*DestOffset*/ 4);
9325 // Now enact all the shuffles we've computed to move the inputs into their
9327 if (!isNoopShuffleMask(PSHUFLMask))
9328 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9329 getV4X86ShuffleImm8ForMask(PSHUFLMask, DAG));
9330 if (!isNoopShuffleMask(PSHUFHMask))
9331 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9332 getV4X86ShuffleImm8ForMask(PSHUFHMask, DAG));
9333 if (!isNoopShuffleMask(PSHUFDMask))
9334 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9335 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32,
9336 DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V),
9337 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
9339 // At this point, each half should contain all its inputs, and we can then
9340 // just shuffle them into their final position.
9341 assert(std::count_if(LoMask.begin(), LoMask.end(),
9342 [](int M) { return M >= 4; }) == 0 &&
9343 "Failed to lift all the high half inputs to the low mask!");
9344 assert(std::count_if(HiMask.begin(), HiMask.end(),
9345 [](int M) { return M >= 0 && M < 4; }) == 0 &&
9346 "Failed to lift all the low half inputs to the high mask!");
9348 // Do a half shuffle for the low mask.
9349 if (!isNoopShuffleMask(LoMask))
9350 V = DAG.getNode(X86ISD::PSHUFLW, DL, MVT::v8i16, V,
9351 getV4X86ShuffleImm8ForMask(LoMask, DAG));
9353 // Do a half shuffle with the high mask after shifting its values down.
9354 for (int &M : HiMask)
9357 if (!isNoopShuffleMask(HiMask))
9358 V = DAG.getNode(X86ISD::PSHUFHW, DL, MVT::v8i16, V,
9359 getV4X86ShuffleImm8ForMask(HiMask, DAG));
9364 /// \brief Detect whether the mask pattern should be lowered through
9367 /// This essentially tests whether viewing the mask as an interleaving of two
9368 /// sub-sequences reduces the cross-input traffic of a blend operation. If so,
9369 /// lowering it through interleaving is a significantly better strategy.
9370 static bool shouldLowerAsInterleaving(ArrayRef<int> Mask) {
9371 int NumEvenInputs[2] = {0, 0};
9372 int NumOddInputs[2] = {0, 0};
9373 int NumLoInputs[2] = {0, 0};
9374 int NumHiInputs[2] = {0, 0};
9375 for (int i = 0, Size = Mask.size(); i < Size; ++i) {
9379 int InputIdx = Mask[i] >= Size;
9382 ++NumLoInputs[InputIdx];
9384 ++NumHiInputs[InputIdx];
9387 ++NumEvenInputs[InputIdx];
9389 ++NumOddInputs[InputIdx];
9392 // The minimum number of cross-input results for both the interleaved and
9393 // split cases. If interleaving results in fewer cross-input results, return
9395 int InterleavedCrosses = std::min(NumEvenInputs[1] + NumOddInputs[0],
9396 NumEvenInputs[0] + NumOddInputs[1]);
9397 int SplitCrosses = std::min(NumLoInputs[1] + NumHiInputs[0],
9398 NumLoInputs[0] + NumHiInputs[1]);
9399 return InterleavedCrosses < SplitCrosses;
9402 /// \brief Blend two v8i16 vectors using a naive unpack strategy.
9404 /// This strategy only works when the inputs from each vector fit into a single
9405 /// half of that vector, and generally there are not so many inputs as to leave
9406 /// the in-place shuffles required highly constrained (and thus expensive). It
9407 /// shifts all the inputs into a single side of both input vectors and then
9408 /// uses an unpack to interleave these inputs in a single vector. At that
9409 /// point, we will fall back on the generic single input shuffle lowering.
9410 static SDValue lowerV8I16BasicBlendVectorShuffle(SDLoc DL, SDValue V1,
9412 MutableArrayRef<int> Mask,
9413 const X86Subtarget *Subtarget,
9414 SelectionDAG &DAG) {
9415 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9416 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad input type!");
9417 SmallVector<int, 3> LoV1Inputs, HiV1Inputs, LoV2Inputs, HiV2Inputs;
9418 for (int i = 0; i < 8; ++i)
9419 if (Mask[i] >= 0 && Mask[i] < 4)
9420 LoV1Inputs.push_back(i);
9421 else if (Mask[i] >= 4 && Mask[i] < 8)
9422 HiV1Inputs.push_back(i);
9423 else if (Mask[i] >= 8 && Mask[i] < 12)
9424 LoV2Inputs.push_back(i);
9425 else if (Mask[i] >= 12)
9426 HiV2Inputs.push_back(i);
9428 int NumV1Inputs = LoV1Inputs.size() + HiV1Inputs.size();
9429 int NumV2Inputs = LoV2Inputs.size() + HiV2Inputs.size();
9432 assert(NumV1Inputs > 0 && NumV1Inputs <= 3 && "At most 3 inputs supported");
9433 assert(NumV2Inputs > 0 && NumV2Inputs <= 3 && "At most 3 inputs supported");
9434 assert(NumV1Inputs + NumV2Inputs <= 4 && "At most 4 combined inputs");
9436 bool MergeFromLo = LoV1Inputs.size() + LoV2Inputs.size() >=
9437 HiV1Inputs.size() + HiV2Inputs.size();
9439 auto moveInputsToHalf = [&](SDValue V, ArrayRef<int> LoInputs,
9440 ArrayRef<int> HiInputs, bool MoveToLo,
9442 ArrayRef<int> GoodInputs = MoveToLo ? LoInputs : HiInputs;
9443 ArrayRef<int> BadInputs = MoveToLo ? HiInputs : LoInputs;
9444 if (BadInputs.empty())
9447 int MoveMask[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9448 int MoveOffset = MoveToLo ? 0 : 4;
9450 if (GoodInputs.empty()) {
9451 for (int BadInput : BadInputs) {
9452 MoveMask[Mask[BadInput] % 4 + MoveOffset] = Mask[BadInput] - MaskOffset;
9453 Mask[BadInput] = Mask[BadInput] % 4 + MoveOffset + MaskOffset;
9456 if (GoodInputs.size() == 2) {
9457 // If the low inputs are spread across two dwords, pack them into
9459 MoveMask[MoveOffset] = Mask[GoodInputs[0]] - MaskOffset;
9460 MoveMask[MoveOffset + 1] = Mask[GoodInputs[1]] - MaskOffset;
9461 Mask[GoodInputs[0]] = MoveOffset + MaskOffset;
9462 Mask[GoodInputs[1]] = MoveOffset + 1 + MaskOffset;
9464 // Otherwise pin the good inputs.
9465 for (int GoodInput : GoodInputs)
9466 MoveMask[Mask[GoodInput] - MaskOffset] = Mask[GoodInput] - MaskOffset;
9469 if (BadInputs.size() == 2) {
9470 // If we have two bad inputs then there may be either one or two good
9471 // inputs fixed in place. Find a fixed input, and then find the *other*
9472 // two adjacent indices by using modular arithmetic.
9474 std::find_if(std::begin(MoveMask) + MoveOffset, std::end(MoveMask),
9475 [](int M) { return M >= 0; }) -
9476 std::begin(MoveMask);
9478 ((((GoodMaskIdx - MoveOffset) & ~1) + 2) % 4) + MoveOffset;
9479 assert(MoveMask[MoveMaskIdx] == -1 && "Expected empty slot");
9480 assert(MoveMask[MoveMaskIdx + 1] == -1 && "Expected empty slot");
9481 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9482 MoveMask[MoveMaskIdx + 1] = Mask[BadInputs[1]] - MaskOffset;
9483 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9484 Mask[BadInputs[1]] = MoveMaskIdx + 1 + MaskOffset;
9486 assert(BadInputs.size() == 1 && "All sizes handled");
9487 int MoveMaskIdx = std::find(std::begin(MoveMask) + MoveOffset,
9488 std::end(MoveMask), -1) -
9489 std::begin(MoveMask);
9490 MoveMask[MoveMaskIdx] = Mask[BadInputs[0]] - MaskOffset;
9491 Mask[BadInputs[0]] = MoveMaskIdx + MaskOffset;
9495 return DAG.getVectorShuffle(MVT::v8i16, DL, V, DAG.getUNDEF(MVT::v8i16),
9498 V1 = moveInputsToHalf(V1, LoV1Inputs, HiV1Inputs, MergeFromLo,
9500 V2 = moveInputsToHalf(V2, LoV2Inputs, HiV2Inputs, MergeFromLo,
9503 // FIXME: Select an interleaving of the merge of V1 and V2 that minimizes
9504 // cross-half traffic in the final shuffle.
9506 // Munge the mask to be a single-input mask after the unpack merges the
9510 M = 2 * (M % 4) + (M / 8);
9512 return DAG.getVectorShuffle(
9513 MVT::v8i16, DL, DAG.getNode(MergeFromLo ? X86ISD::UNPCKL : X86ISD::UNPCKH,
9514 DL, MVT::v8i16, V1, V2),
9515 DAG.getUNDEF(MVT::v8i16), Mask);
9518 /// \brief Generic lowering of 8-lane i16 shuffles.
9520 /// This handles both single-input shuffles and combined shuffle/blends with
9521 /// two inputs. The single input shuffles are immediately delegated to
9522 /// a dedicated lowering routine.
9524 /// The blends are lowered in one of three fundamental ways. If there are few
9525 /// enough inputs, it delegates to a basic UNPCK-based strategy. If the shuffle
9526 /// of the input is significantly cheaper when lowered as an interleaving of
9527 /// the two inputs, try to interleave them. Otherwise, blend the low and high
9528 /// halves of the inputs separately (making them have relatively few inputs)
9529 /// and then concatenate them.
9530 static SDValue lowerV8I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9531 const X86Subtarget *Subtarget,
9532 SelectionDAG &DAG) {
9534 assert(Op.getSimpleValueType() == MVT::v8i16 && "Bad shuffle type!");
9535 assert(V1.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9536 assert(V2.getSimpleValueType() == MVT::v8i16 && "Bad operand type!");
9537 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9538 ArrayRef<int> OrigMask = SVOp->getMask();
9539 int MaskStorage[8] = {OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9540 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7]};
9541 MutableArrayRef<int> Mask(MaskStorage);
9543 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
9545 // Whenever we can lower this as a zext, that instruction is strictly faster
9546 // than any alternative.
9547 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9548 DL, MVT::v8i16, V1, V2, OrigMask, Subtarget, DAG))
9551 auto isV1 = [](int M) { return M >= 0 && M < 8; };
9552 auto isV2 = [](int M) { return M >= 8; };
9554 int NumV1Inputs = std::count_if(Mask.begin(), Mask.end(), isV1);
9555 int NumV2Inputs = std::count_if(Mask.begin(), Mask.end(), isV2);
9557 if (NumV2Inputs == 0)
9558 return lowerV8I16SingleInputVectorShuffle(DL, V1, Mask, Subtarget, DAG);
9560 assert(NumV1Inputs > 0 && "All single-input shuffles should be canonicalized "
9561 "to be V1-input shuffles.");
9563 // Try to use bit shift instructions.
9564 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9565 DL, MVT::v8i16, V1, V2, Mask, DAG))
9568 // Try to use byte shift instructions.
9569 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9570 DL, MVT::v8i16, V1, V2, Mask, DAG))
9573 // There are special ways we can lower some single-element blends.
9574 if (NumV2Inputs == 1)
9575 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v8i16, DL, V1, V2,
9576 Mask, Subtarget, DAG))
9579 if (Subtarget->hasSSE41())
9580 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i16, V1, V2, Mask,
9584 if (SDValue Masked =
9585 lowerVectorShuffleAsBitMask(DL, MVT::v8i16, V1, V2, Mask, DAG))
9588 // Use dedicated unpack instructions for masks that match their pattern.
9589 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 2, 10, 3, 11))
9590 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, V1, V2);
9591 if (isShuffleEquivalent(V1, V2, Mask, 4, 12, 5, 13, 6, 14, 7, 15))
9592 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i16, V1, V2);
9594 // Try to use byte rotation instructions.
9595 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9596 DL, MVT::v8i16, V1, V2, Mask, Subtarget, DAG))
9599 if (NumV1Inputs + NumV2Inputs <= 4)
9600 return lowerV8I16BasicBlendVectorShuffle(DL, V1, V2, Mask, Subtarget, DAG);
9602 // Check whether an interleaving lowering is likely to be more efficient.
9603 // This isn't perfect but it is a strong heuristic that tends to work well on
9604 // the kinds of shuffles that show up in practice.
9606 // FIXME: Handle 1x, 2x, and 4x interleaving.
9607 if (shouldLowerAsInterleaving(Mask)) {
9608 // FIXME: Figure out whether we should pack these into the low or high
9611 int EMask[8], OMask[8];
9612 for (int i = 0; i < 4; ++i) {
9613 EMask[i] = Mask[2*i];
9614 OMask[i] = Mask[2*i + 1];
9619 SDValue Evens = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, EMask);
9620 SDValue Odds = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, OMask);
9622 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i16, Evens, Odds);
9625 int LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9626 int HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9628 for (int i = 0; i < 4; ++i) {
9629 LoBlendMask[i] = Mask[i];
9630 HiBlendMask[i] = Mask[i + 4];
9633 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
9634 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
9635 LoV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, LoV);
9636 HiV = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, HiV);
9638 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
9639 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v2i64, LoV, HiV));
9642 /// \brief Check whether a compaction lowering can be done by dropping even
9643 /// elements and compute how many times even elements must be dropped.
9645 /// This handles shuffles which take every Nth element where N is a power of
9646 /// two. Example shuffle masks:
9648 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 0, 2, 4, 6, 8, 10, 12, 14
9649 /// N = 1: 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
9650 /// N = 2: 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12, 0, 4, 8, 12
9651 /// N = 2: 0, 4, 8, 12, 16, 20, 24, 28, 0, 4, 8, 12, 16, 20, 24, 28
9652 /// N = 3: 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8, 0, 8
9653 /// N = 3: 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24, 0, 8, 16, 24
9655 /// Any of these lanes can of course be undef.
9657 /// This routine only supports N <= 3.
9658 /// FIXME: Evaluate whether either AVX or AVX-512 have any opportunities here
9661 /// \returns N above, or the number of times even elements must be dropped if
9662 /// there is such a number. Otherwise returns zero.
9663 static int canLowerByDroppingEvenElements(ArrayRef<int> Mask) {
9664 // Figure out whether we're looping over two inputs or just one.
9665 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9667 // The modulus for the shuffle vector entries is based on whether this is
9668 // a single input or not.
9669 int ShuffleModulus = Mask.size() * (IsSingleInput ? 1 : 2);
9670 assert(isPowerOf2_32((uint32_t)ShuffleModulus) &&
9671 "We should only be called with masks with a power-of-2 size!");
9673 uint64_t ModMask = (uint64_t)ShuffleModulus - 1;
9675 // We track whether the input is viable for all power-of-2 strides 2^1, 2^2,
9676 // and 2^3 simultaneously. This is because we may have ambiguity with
9677 // partially undef inputs.
9678 bool ViableForN[3] = {true, true, true};
9680 for (int i = 0, e = Mask.size(); i < e; ++i) {
9681 // Ignore undef lanes, we'll optimistically collapse them to the pattern we
9686 bool IsAnyViable = false;
9687 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9688 if (ViableForN[j]) {
9691 // The shuffle mask must be equal to (i * 2^N) % M.
9692 if ((uint64_t)Mask[i] == (((uint64_t)i << N) & ModMask))
9695 ViableForN[j] = false;
9697 // Early exit if we exhaust the possible powers of two.
9702 for (unsigned j = 0; j != array_lengthof(ViableForN); ++j)
9706 // Return 0 as there is no viable power of two.
9710 /// \brief Generic lowering of v16i8 shuffles.
9712 /// This is a hybrid strategy to lower v16i8 vectors. It first attempts to
9713 /// detect any complexity reducing interleaving. If that doesn't help, it uses
9714 /// UNPCK to spread the i8 elements across two i16-element vectors, and uses
9715 /// the existing lowering for v8i16 blends on each half, finally PACK-ing them
9717 static SDValue lowerV16I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
9718 const X86Subtarget *Subtarget,
9719 SelectionDAG &DAG) {
9721 assert(Op.getSimpleValueType() == MVT::v16i8 && "Bad shuffle type!");
9722 assert(V1.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9723 assert(V2.getSimpleValueType() == MVT::v16i8 && "Bad operand type!");
9724 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
9725 ArrayRef<int> OrigMask = SVOp->getMask();
9726 assert(OrigMask.size() == 16 && "Unexpected mask size for v16 shuffle!");
9728 // Try to use bit shift instructions.
9729 if (SDValue Shift = lowerVectorShuffleAsBitShift(
9730 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9733 // Try to use byte shift instructions.
9734 if (SDValue Shift = lowerVectorShuffleAsByteShift(
9735 DL, MVT::v16i8, V1, V2, OrigMask, DAG))
9738 // Try to use byte rotation instructions.
9739 if (SDValue Rotate = lowerVectorShuffleAsByteRotate(
9740 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9743 // Try to use a zext lowering.
9744 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(
9745 DL, MVT::v16i8, V1, V2, OrigMask, Subtarget, DAG))
9748 int MaskStorage[16] = {
9749 OrigMask[0], OrigMask[1], OrigMask[2], OrigMask[3],
9750 OrigMask[4], OrigMask[5], OrigMask[6], OrigMask[7],
9751 OrigMask[8], OrigMask[9], OrigMask[10], OrigMask[11],
9752 OrigMask[12], OrigMask[13], OrigMask[14], OrigMask[15]};
9753 MutableArrayRef<int> Mask(MaskStorage);
9754 MutableArrayRef<int> LoMask = Mask.slice(0, 8);
9755 MutableArrayRef<int> HiMask = Mask.slice(8, 8);
9758 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 16; });
9760 // For single-input shuffles, there are some nicer lowering tricks we can use.
9761 if (NumV2Elements == 0) {
9762 // Check for being able to broadcast a single element.
9763 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i8, DL, V1,
9764 Mask, Subtarget, DAG))
9767 // Check whether we can widen this to an i16 shuffle by duplicating bytes.
9768 // Notably, this handles splat and partial-splat shuffles more efficiently.
9769 // However, it only makes sense if the pre-duplication shuffle simplifies
9770 // things significantly. Currently, this means we need to be able to
9771 // express the pre-duplication shuffle as an i16 shuffle.
9773 // FIXME: We should check for other patterns which can be widened into an
9774 // i16 shuffle as well.
9775 auto canWidenViaDuplication = [](ArrayRef<int> Mask) {
9776 for (int i = 0; i < 16; i += 2)
9777 if (Mask[i] != -1 && Mask[i + 1] != -1 && Mask[i] != Mask[i + 1])
9782 auto tryToWidenViaDuplication = [&]() -> SDValue {
9783 if (!canWidenViaDuplication(Mask))
9785 SmallVector<int, 4> LoInputs;
9786 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(LoInputs),
9787 [](int M) { return M >= 0 && M < 8; });
9788 std::sort(LoInputs.begin(), LoInputs.end());
9789 LoInputs.erase(std::unique(LoInputs.begin(), LoInputs.end()),
9791 SmallVector<int, 4> HiInputs;
9792 std::copy_if(Mask.begin(), Mask.end(), std::back_inserter(HiInputs),
9793 [](int M) { return M >= 8; });
9794 std::sort(HiInputs.begin(), HiInputs.end());
9795 HiInputs.erase(std::unique(HiInputs.begin(), HiInputs.end()),
9798 bool TargetLo = LoInputs.size() >= HiInputs.size();
9799 ArrayRef<int> InPlaceInputs = TargetLo ? LoInputs : HiInputs;
9800 ArrayRef<int> MovingInputs = TargetLo ? HiInputs : LoInputs;
9802 int PreDupI16Shuffle[] = {-1, -1, -1, -1, -1, -1, -1, -1};
9803 SmallDenseMap<int, int, 8> LaneMap;
9804 for (int I : InPlaceInputs) {
9805 PreDupI16Shuffle[I/2] = I/2;
9808 int j = TargetLo ? 0 : 4, je = j + 4;
9809 for (int i = 0, ie = MovingInputs.size(); i < ie; ++i) {
9810 // Check if j is already a shuffle of this input. This happens when
9811 // there are two adjacent bytes after we move the low one.
9812 if (PreDupI16Shuffle[j] != MovingInputs[i] / 2) {
9813 // If we haven't yet mapped the input, search for a slot into which
9815 while (j < je && PreDupI16Shuffle[j] != -1)
9819 // We can't place the inputs into a single half with a simple i16 shuffle, so bail.
9822 // Map this input with the i16 shuffle.
9823 PreDupI16Shuffle[j] = MovingInputs[i] / 2;
9826 // Update the lane map based on the mapping we ended up with.
9827 LaneMap[MovingInputs[i]] = 2 * j + MovingInputs[i] % 2;
9830 ISD::BITCAST, DL, MVT::v16i8,
9831 DAG.getVectorShuffle(MVT::v8i16, DL,
9832 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9833 DAG.getUNDEF(MVT::v8i16), PreDupI16Shuffle));
9835 // Unpack the bytes to form the i16s that will be shuffled into place.
9836 V1 = DAG.getNode(TargetLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9837 MVT::v16i8, V1, V1);
9839 int PostDupI16Shuffle[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9840 for (int i = 0; i < 16; ++i)
9841 if (Mask[i] != -1) {
9842 int MappedMask = LaneMap[Mask[i]] - (TargetLo ? 0 : 8);
9843 assert(MappedMask < 8 && "Invalid v8 shuffle mask!");
9844 if (PostDupI16Shuffle[i / 2] == -1)
9845 PostDupI16Shuffle[i / 2] = MappedMask;
9847 assert(PostDupI16Shuffle[i / 2] == MappedMask &&
9848 "Conflicting entrties in the original shuffle!");
9851 ISD::BITCAST, DL, MVT::v16i8,
9852 DAG.getVectorShuffle(MVT::v8i16, DL,
9853 DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1),
9854 DAG.getUNDEF(MVT::v8i16), PostDupI16Shuffle));
9856 if (SDValue V = tryToWidenViaDuplication())
9860 // Check whether an interleaving lowering is likely to be more efficient.
9861 // This isn't perfect but it is a strong heuristic that tends to work well on
9862 // the kinds of shuffles that show up in practice.
9864 // FIXME: We need to handle other interleaving widths (i16, i32, ...).
9865 if (shouldLowerAsInterleaving(Mask)) {
9866 int NumLoHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9867 return (M >= 0 && M < 8) || (M >= 16 && M < 24);
9869 int NumHiHalf = std::count_if(Mask.begin(), Mask.end(), [](int M) {
9870 return (M >= 8 && M < 16) || M >= 24;
9872 int EMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9873 -1, -1, -1, -1, -1, -1, -1, -1};
9874 int OMask[16] = {-1, -1, -1, -1, -1, -1, -1, -1,
9875 -1, -1, -1, -1, -1, -1, -1, -1};
9876 bool UnpackLo = NumLoHalf >= NumHiHalf;
9877 MutableArrayRef<int> TargetEMask(UnpackLo ? EMask : EMask + 8, 8);
9878 MutableArrayRef<int> TargetOMask(UnpackLo ? OMask : OMask + 8, 8);
9879 for (int i = 0; i < 8; ++i) {
9880 TargetEMask[i] = Mask[2 * i];
9881 TargetOMask[i] = Mask[2 * i + 1];
9884 SDValue Evens = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, EMask);
9885 SDValue Odds = DAG.getVectorShuffle(MVT::v16i8, DL, V1, V2, OMask);
9887 return DAG.getNode(UnpackLo ? X86ISD::UNPCKL : X86ISD::UNPCKH, DL,
9888 MVT::v16i8, Evens, Odds);
9891 // Check for SSSE3 which lets us lower all v16i8 shuffles much more directly
9892 // with PSHUFB. It is important to do this before we attempt to generate any
9893 // blends but after all of the single-input lowerings. If the single input
9894 // lowerings can find an instruction sequence that is faster than a PSHUFB, we
9895 // want to preserve that and we can DAG combine any longer sequences into
9896 // a PSHUFB in the end. But once we start blending from multiple inputs,
9897 // the complexity of DAG combining bad patterns back into PSHUFB is too high,
9898 // and there are *very* few patterns that would actually be faster than the
9899 // PSHUFB approach because of its ability to zero lanes.
9901 // FIXME: The only exceptions to the above are blends which are exact
9902 // interleavings with direct instructions supporting them. We currently don't
9903 // handle those well here.
9904 if (Subtarget->hasSSSE3()) {
9907 bool V1InUse = false;
9908 bool V2InUse = false;
9909 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
9911 for (int i = 0; i < 16; ++i) {
9912 if (Mask[i] == -1) {
9913 V1Mask[i] = V2Mask[i] = DAG.getUNDEF(MVT::i8);
9915 const int ZeroMask = 0x80;
9916 int V1Idx = (Mask[i] < 16 ? Mask[i] : ZeroMask);
9917 int V2Idx = (Mask[i] < 16 ? ZeroMask : Mask[i] - 16);
9919 V1Idx = V2Idx = ZeroMask;
9920 V1Mask[i] = DAG.getConstant(V1Idx, MVT::i8);
9921 V2Mask[i] = DAG.getConstant(V2Idx, MVT::i8);
9922 V1InUse |= (ZeroMask != V1Idx);
9923 V2InUse |= (ZeroMask != V2Idx);
9928 V1 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V1,
9929 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V1Mask));
9931 V2 = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, V2,
9932 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, V2Mask));
9934 // If we need shuffled inputs from both, blend the two.
9935 if (V1InUse && V2InUse)
9936 return DAG.getNode(ISD::OR, DL, MVT::v16i8, V1, V2);
9938 return V1; // Single inputs are easy.
9940 return V2; // Single inputs are easy.
9941 // Shuffling to a zeroable vector.
9942 return getZeroVector(MVT::v16i8, Subtarget, DAG, DL);
9945 // There are special ways we can lower some single-element blends.
9946 if (NumV2Elements == 1)
9947 if (SDValue V = lowerVectorShuffleAsElementInsertion(MVT::v16i8, DL, V1, V2,
9948 Mask, Subtarget, DAG))
9951 // Check whether a compaction lowering can be done. This handles shuffles
9952 // which take every Nth element for some even N. See the helper function for
9955 // We special case these as they can be particularly efficiently handled with
9956 // the PACKUSB instruction on x86 and they show up in common patterns of
9957 // rearranging bytes to truncate wide elements.
9958 if (int NumEvenDrops = canLowerByDroppingEvenElements(Mask)) {
9959 // NumEvenDrops is the power of two stride of the elements. Another way of
9960 // thinking about it is that we need to drop the even elements this many
9961 // times to get the original input.
9962 bool IsSingleInput = isSingleInputShuffleMask(Mask);
9964 // First we need to zero all the dropped bytes.
9965 assert(NumEvenDrops <= 3 &&
9966 "No support for dropping even elements more than 3 times.");
9967 // We use the mask type to pick which bytes are preserved based on how many
9968 // elements are dropped.
9969 MVT MaskVTs[] = { MVT::v8i16, MVT::v4i32, MVT::v2i64 };
9970 SDValue ByteClearMask =
9971 DAG.getNode(ISD::BITCAST, DL, MVT::v16i8,
9972 DAG.getConstant(0xFF, MaskVTs[NumEvenDrops - 1]));
9973 V1 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V1, ByteClearMask);
9975 V2 = DAG.getNode(ISD::AND, DL, MVT::v16i8, V2, ByteClearMask);
9977 // Now pack things back together.
9978 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V1);
9979 V2 = IsSingleInput ? V1 : DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V2);
9980 SDValue Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, V1, V2);
9981 for (int i = 1; i < NumEvenDrops; ++i) {
9982 Result = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, Result);
9983 Result = DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, Result, Result);
9989 int V1LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9990 int V1HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9991 int V2LoBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9992 int V2HiBlendMask[8] = {-1, -1, -1, -1, -1, -1, -1, -1};
9994 auto buildBlendMasks = [](MutableArrayRef<int> HalfMask,
9995 MutableArrayRef<int> V1HalfBlendMask,
9996 MutableArrayRef<int> V2HalfBlendMask) {
9997 for (int i = 0; i < 8; ++i)
9998 if (HalfMask[i] >= 0 && HalfMask[i] < 16) {
9999 V1HalfBlendMask[i] = HalfMask[i];
10001 } else if (HalfMask[i] >= 16) {
10002 V2HalfBlendMask[i] = HalfMask[i] - 16;
10003 HalfMask[i] = i + 8;
10006 buildBlendMasks(LoMask, V1LoBlendMask, V2LoBlendMask);
10007 buildBlendMasks(HiMask, V1HiBlendMask, V2HiBlendMask);
10009 SDValue Zero = getZeroVector(MVT::v8i16, Subtarget, DAG, DL);
10011 auto buildLoAndHiV8s = [&](SDValue V, MutableArrayRef<int> LoBlendMask,
10012 MutableArrayRef<int> HiBlendMask) {
10014 // Check if any of the odd lanes in the v16i8 are used. If not, we can mask
10015 // them out and avoid using UNPCK{L,H} to extract the elements of V as
10017 if (std::none_of(LoBlendMask.begin(), LoBlendMask.end(),
10018 [](int M) { return M >= 0 && M % 2 == 1; }) &&
10019 std::none_of(HiBlendMask.begin(), HiBlendMask.end(),
10020 [](int M) { return M >= 0 && M % 2 == 1; })) {
10021 // Use a mask to drop the high bytes.
10022 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
10023 V1 = DAG.getNode(ISD::AND, DL, MVT::v8i16, V1,
10024 DAG.getConstant(0x00FF, MVT::v8i16));
10026 // This will be a single vector shuffle instead of a blend so nuke V2.
10027 V2 = DAG.getUNDEF(MVT::v8i16);
10029 // Squash the masks to point directly into V1.
10030 for (int &M : LoBlendMask)
10033 for (int &M : HiBlendMask)
10037 // Otherwise just unpack the low half of V into V1 and the high half into
10038 // V2 so that we can blend them as i16s.
10039 V1 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10040 DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i8, V, Zero));
10041 V2 = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16,
10042 DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i8, V, Zero));
10045 SDValue BlendedLo = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, LoBlendMask);
10046 SDValue BlendedHi = DAG.getVectorShuffle(MVT::v8i16, DL, V1, V2, HiBlendMask);
10047 return std::make_pair(BlendedLo, BlendedHi);
10049 SDValue V1Lo, V1Hi, V2Lo, V2Hi;
10050 std::tie(V1Lo, V1Hi) = buildLoAndHiV8s(V1, V1LoBlendMask, V1HiBlendMask);
10051 std::tie(V2Lo, V2Hi) = buildLoAndHiV8s(V2, V2LoBlendMask, V2HiBlendMask);
10053 SDValue LoV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Lo, V2Lo, LoMask);
10054 SDValue HiV = DAG.getVectorShuffle(MVT::v8i16, DL, V1Hi, V2Hi, HiMask);
10056 return DAG.getNode(X86ISD::PACKUS, DL, MVT::v16i8, LoV, HiV);
10059 /// \brief Dispatching routine to lower various 128-bit x86 vector shuffles.
10061 /// This routine breaks down the specific type of 128-bit shuffle and
10062 /// dispatches to the lowering routines accordingly.
10063 static SDValue lower128BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10064 MVT VT, const X86Subtarget *Subtarget,
10065 SelectionDAG &DAG) {
10066 switch (VT.SimpleTy) {
10068 return lowerV2I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10070 return lowerV2F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
10072 return lowerV4I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10074 return lowerV4F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
10076 return lowerV8I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
10078 return lowerV16I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
10081 llvm_unreachable("Unimplemented!");
10085 /// \brief Helper function to test whether a shuffle mask could be
10086 /// simplified by widening the elements being shuffled.
10088 /// Appends the mask for wider elements in WidenedMask if valid. Otherwise
10089 /// leaves it in an unspecified state.
10091 /// NOTE: This must handle normal vector shuffle masks and *target* vector
10092 /// shuffle masks. The latter have the special property of a '-2' representing
10093 /// a zero-ed lane of a vector.
10094 static bool canWidenShuffleElements(ArrayRef<int> Mask,
10095 SmallVectorImpl<int> &WidenedMask) {
10096 for (int i = 0, Size = Mask.size(); i < Size; i += 2) {
10097 // If both elements are undef, its trivial.
10098 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] == SM_SentinelUndef) {
10099 WidenedMask.push_back(SM_SentinelUndef);
10103 // Check for an undef mask and a mask value properly aligned to fit with
10104 // a pair of values. If we find such a case, use the non-undef mask's value.
10105 if (Mask[i] == SM_SentinelUndef && Mask[i + 1] >= 0 && Mask[i + 1] % 2 == 1) {
10106 WidenedMask.push_back(Mask[i + 1] / 2);
10109 if (Mask[i + 1] == SM_SentinelUndef && Mask[i] >= 0 && Mask[i] % 2 == 0) {
10110 WidenedMask.push_back(Mask[i] / 2);
10114 // When zeroing, we need to spread the zeroing across both lanes to widen.
10115 if (Mask[i] == SM_SentinelZero || Mask[i + 1] == SM_SentinelZero) {
10116 if ((Mask[i] == SM_SentinelZero || Mask[i] == SM_SentinelUndef) &&
10117 (Mask[i + 1] == SM_SentinelZero || Mask[i + 1] == SM_SentinelUndef)) {
10118 WidenedMask.push_back(SM_SentinelZero);
10124 // Finally check if the two mask values are adjacent and aligned with
10126 if (Mask[i] != SM_SentinelUndef && Mask[i] % 2 == 0 && Mask[i] + 1 == Mask[i + 1]) {
10127 WidenedMask.push_back(Mask[i] / 2);
10131 // Otherwise we can't safely widen the elements used in this shuffle.
10134 assert(WidenedMask.size() == Mask.size() / 2 &&
10135 "Incorrect size of mask after widening the elements!");
10140 /// \brief Generic routine to split vector shuffle into half-sized shuffles.
10142 /// This routine just extracts two subvectors, shuffles them independently, and
10143 /// then concatenates them back together. This should work effectively with all
10144 /// AVX vector shuffle types.
10145 static SDValue splitAndLowerVectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10146 SDValue V2, ArrayRef<int> Mask,
10147 SelectionDAG &DAG) {
10148 assert(VT.getSizeInBits() >= 256 &&
10149 "Only for 256-bit or wider vector shuffles!");
10150 assert(V1.getSimpleValueType() == VT && "Bad operand type!");
10151 assert(V2.getSimpleValueType() == VT && "Bad operand type!");
10153 ArrayRef<int> LoMask = Mask.slice(0, Mask.size() / 2);
10154 ArrayRef<int> HiMask = Mask.slice(Mask.size() / 2);
10156 int NumElements = VT.getVectorNumElements();
10157 int SplitNumElements = NumElements / 2;
10158 MVT ScalarVT = VT.getScalarType();
10159 MVT SplitVT = MVT::getVectorVT(ScalarVT, NumElements / 2);
10161 // Rather than splitting build-vectors, just build two narrower build
10162 // vectors. This helps shuffling with splats and zeros.
10163 auto SplitVector = [&](SDValue V) {
10164 while (V.getOpcode() == ISD::BITCAST)
10165 V = V->getOperand(0);
10167 MVT OrigVT = V.getSimpleValueType();
10168 int OrigNumElements = OrigVT.getVectorNumElements();
10169 int OrigSplitNumElements = OrigNumElements / 2;
10170 MVT OrigScalarVT = OrigVT.getScalarType();
10171 MVT OrigSplitVT = MVT::getVectorVT(OrigScalarVT, OrigNumElements / 2);
10175 auto *BV = dyn_cast<BuildVectorSDNode>(V);
10177 LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10178 DAG.getIntPtrConstant(0));
10179 HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigSplitVT, V,
10180 DAG.getIntPtrConstant(OrigSplitNumElements));
10183 SmallVector<SDValue, 16> LoOps, HiOps;
10184 for (int i = 0; i < OrigSplitNumElements; ++i) {
10185 LoOps.push_back(BV->getOperand(i));
10186 HiOps.push_back(BV->getOperand(i + OrigSplitNumElements));
10188 LoV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, LoOps);
10189 HiV = DAG.getNode(ISD::BUILD_VECTOR, DL, OrigSplitVT, HiOps);
10191 return std::make_pair(DAG.getNode(ISD::BITCAST, DL, SplitVT, LoV),
10192 DAG.getNode(ISD::BITCAST, DL, SplitVT, HiV));
10195 SDValue LoV1, HiV1, LoV2, HiV2;
10196 std::tie(LoV1, HiV1) = SplitVector(V1);
10197 std::tie(LoV2, HiV2) = SplitVector(V2);
10199 // Now create two 4-way blends of these half-width vectors.
10200 auto HalfBlend = [&](ArrayRef<int> HalfMask) {
10201 bool UseLoV1 = false, UseHiV1 = false, UseLoV2 = false, UseHiV2 = false;
10202 SmallVector<int, 32> V1BlendMask, V2BlendMask, BlendMask;
10203 for (int i = 0; i < SplitNumElements; ++i) {
10204 int M = HalfMask[i];
10205 if (M >= NumElements) {
10206 if (M >= NumElements + SplitNumElements)
10210 V2BlendMask.push_back(M - NumElements);
10211 V1BlendMask.push_back(-1);
10212 BlendMask.push_back(SplitNumElements + i);
10213 } else if (M >= 0) {
10214 if (M >= SplitNumElements)
10218 V2BlendMask.push_back(-1);
10219 V1BlendMask.push_back(M);
10220 BlendMask.push_back(i);
10222 V2BlendMask.push_back(-1);
10223 V1BlendMask.push_back(-1);
10224 BlendMask.push_back(-1);
10228 // Because the lowering happens after all combining takes place, we need to
10229 // manually combine these blend masks as much as possible so that we create
10230 // a minimal number of high-level vector shuffle nodes.
10232 // First try just blending the halves of V1 or V2.
10233 if (!UseLoV1 && !UseHiV1 && !UseLoV2 && !UseHiV2)
10234 return DAG.getUNDEF(SplitVT);
10235 if (!UseLoV2 && !UseHiV2)
10236 return DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10237 if (!UseLoV1 && !UseHiV1)
10238 return DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10240 SDValue V1Blend, V2Blend;
10241 if (UseLoV1 && UseHiV1) {
10243 DAG.getVectorShuffle(SplitVT, DL, LoV1, HiV1, V1BlendMask);
10245 // We only use half of V1 so map the usage down into the final blend mask.
10246 V1Blend = UseLoV1 ? LoV1 : HiV1;
10247 for (int i = 0; i < SplitNumElements; ++i)
10248 if (BlendMask[i] >= 0 && BlendMask[i] < SplitNumElements)
10249 BlendMask[i] = V1BlendMask[i] - (UseLoV1 ? 0 : SplitNumElements);
10251 if (UseLoV2 && UseHiV2) {
10253 DAG.getVectorShuffle(SplitVT, DL, LoV2, HiV2, V2BlendMask);
10255 // We only use half of V2 so map the usage down into the final blend mask.
10256 V2Blend = UseLoV2 ? LoV2 : HiV2;
10257 for (int i = 0; i < SplitNumElements; ++i)
10258 if (BlendMask[i] >= SplitNumElements)
10259 BlendMask[i] = V2BlendMask[i] + (UseLoV2 ? SplitNumElements : 0);
10261 return DAG.getVectorShuffle(SplitVT, DL, V1Blend, V2Blend, BlendMask);
10263 SDValue Lo = HalfBlend(LoMask);
10264 SDValue Hi = HalfBlend(HiMask);
10265 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, Lo, Hi);
10268 /// \brief Either split a vector in halves or decompose the shuffles and the
10271 /// This is provided as a good fallback for many lowerings of non-single-input
10272 /// shuffles with more than one 128-bit lane. In those cases, we want to select
10273 /// between splitting the shuffle into 128-bit components and stitching those
10274 /// back together vs. extracting the single-input shuffles and blending those
10276 static SDValue lowerVectorShuffleAsSplitOrBlend(SDLoc DL, MVT VT, SDValue V1,
10277 SDValue V2, ArrayRef<int> Mask,
10278 SelectionDAG &DAG) {
10279 assert(!isSingleInputShuffleMask(Mask) && "This routine must not be used to "
10280 "lower single-input shuffles as it "
10281 "could then recurse on itself.");
10282 int Size = Mask.size();
10284 // If this can be modeled as a broadcast of two elements followed by a blend,
10285 // prefer that lowering. This is especially important because broadcasts can
10286 // often fold with memory operands.
10287 auto DoBothBroadcast = [&] {
10288 int V1BroadcastIdx = -1, V2BroadcastIdx = -1;
10291 if (V2BroadcastIdx == -1)
10292 V2BroadcastIdx = M - Size;
10293 else if (M - Size != V2BroadcastIdx)
10295 } else if (M >= 0) {
10296 if (V1BroadcastIdx == -1)
10297 V1BroadcastIdx = M;
10298 else if (M != V1BroadcastIdx)
10303 if (DoBothBroadcast())
10304 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask,
10307 // If the inputs all stem from a single 128-bit lane of each input, then we
10308 // split them rather than blending because the split will decompose to
10309 // unusually few instructions.
10310 int LaneCount = VT.getSizeInBits() / 128;
10311 int LaneSize = Size / LaneCount;
10312 SmallBitVector LaneInputs[2];
10313 LaneInputs[0].resize(LaneCount, false);
10314 LaneInputs[1].resize(LaneCount, false);
10315 for (int i = 0; i < Size; ++i)
10317 LaneInputs[Mask[i] / Size][(Mask[i] % Size) / LaneSize] = true;
10318 if (LaneInputs[0].count() <= 1 && LaneInputs[1].count() <= 1)
10319 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10321 // Otherwise, just fall back to decomposed shuffles and a blend. This requires
10322 // that the decomposed single-input shuffles don't end up here.
10323 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10326 /// \brief Lower a vector shuffle crossing multiple 128-bit lanes as
10327 /// a permutation and blend of those lanes.
10329 /// This essentially blends the out-of-lane inputs to each lane into the lane
10330 /// from a permuted copy of the vector. This lowering strategy results in four
10331 /// instructions in the worst case for a single-input cross lane shuffle which
10332 /// is lower than any other fully general cross-lane shuffle strategy I'm aware
10333 /// of. Special cases for each particular shuffle pattern should be handled
10334 /// prior to trying this lowering.
10335 static SDValue lowerVectorShuffleAsLanePermuteAndBlend(SDLoc DL, MVT VT,
10336 SDValue V1, SDValue V2,
10337 ArrayRef<int> Mask,
10338 SelectionDAG &DAG) {
10339 // FIXME: This should probably be generalized for 512-bit vectors as well.
10340 assert(VT.getSizeInBits() == 256 && "Only for 256-bit vector shuffles!");
10341 int LaneSize = Mask.size() / 2;
10343 // If there are only inputs from one 128-bit lane, splitting will in fact be
10344 // less expensive. The flags track wether the given lane contains an element
10345 // that crosses to another lane.
10346 bool LaneCrossing[2] = {false, false};
10347 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10348 if (Mask[i] >= 0 && (Mask[i] % Size) / LaneSize != i / LaneSize)
10349 LaneCrossing[(Mask[i] % Size) / LaneSize] = true;
10350 if (!LaneCrossing[0] || !LaneCrossing[1])
10351 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
10353 if (isSingleInputShuffleMask(Mask)) {
10354 SmallVector<int, 32> FlippedBlendMask;
10355 for (int i = 0, Size = Mask.size(); i < Size; ++i)
10356 FlippedBlendMask.push_back(
10357 Mask[i] < 0 ? -1 : (((Mask[i] % Size) / LaneSize == i / LaneSize)
10359 : Mask[i] % LaneSize +
10360 (i / LaneSize) * LaneSize + Size));
10362 // Flip the vector, and blend the results which should now be in-lane. The
10363 // VPERM2X128 mask uses the low 2 bits for the low source and bits 4 and
10364 // 5 for the high source. The value 3 selects the high half of source 2 and
10365 // the value 2 selects the low half of source 2. We only use source 2 to
10366 // allow folding it into a memory operand.
10367 unsigned PERMMask = 3 | 2 << 4;
10368 SDValue Flipped = DAG.getNode(X86ISD::VPERM2X128, DL, VT, DAG.getUNDEF(VT),
10369 V1, DAG.getConstant(PERMMask, MVT::i8));
10370 return DAG.getVectorShuffle(VT, DL, V1, Flipped, FlippedBlendMask);
10373 // This now reduces to two single-input shuffles of V1 and V2 which at worst
10374 // will be handled by the above logic and a blend of the results, much like
10375 // other patterns in AVX.
10376 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, VT, V1, V2, Mask, DAG);
10379 /// \brief Handle lowering 2-lane 128-bit shuffles.
10380 static SDValue lowerV2X128VectorShuffle(SDLoc DL, MVT VT, SDValue V1,
10381 SDValue V2, ArrayRef<int> Mask,
10382 const X86Subtarget *Subtarget,
10383 SelectionDAG &DAG) {
10384 // Blends are faster and handle all the non-lane-crossing cases.
10385 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, VT, V1, V2, Mask,
10389 MVT SubVT = MVT::getVectorVT(VT.getVectorElementType(),
10390 VT.getVectorNumElements() / 2);
10391 // Check for patterns which can be matched with a single insert of a 128-bit
10393 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 0, 1) ||
10394 isShuffleEquivalent(V1, V2, Mask, 0, 1, 4, 5)) {
10395 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10396 DAG.getIntPtrConstant(0));
10397 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT,
10398 Mask[2] < 4 ? V1 : V2, DAG.getIntPtrConstant(0));
10399 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10401 if (isShuffleEquivalent(V1, V2, Mask, 0, 1, 6, 7)) {
10402 SDValue LoV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V1,
10403 DAG.getIntPtrConstant(0));
10404 SDValue HiV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVT, V2,
10405 DAG.getIntPtrConstant(2));
10406 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LoV, HiV);
10409 // Otherwise form a 128-bit permutation.
10410 // FIXME: Detect zero-vector inputs and use the VPERM2X128 to zero that half.
10411 unsigned PermMask = Mask[0] / 2 | (Mask[2] / 2) << 4;
10412 return DAG.getNode(X86ISD::VPERM2X128, DL, VT, V1, V2,
10413 DAG.getConstant(PermMask, MVT::i8));
10416 /// \brief Lower a vector shuffle by first fixing the 128-bit lanes and then
10417 /// shuffling each lane.
10419 /// This will only succeed when the result of fixing the 128-bit lanes results
10420 /// in a single-input non-lane-crossing shuffle with a repeating shuffle mask in
10421 /// each 128-bit lanes. This handles many cases where we can quickly blend away
10422 /// the lane crosses early and then use simpler shuffles within each lane.
10424 /// FIXME: It might be worthwhile at some point to support this without
10425 /// requiring the 128-bit lane-relative shuffles to be repeating, but currently
10426 /// in x86 only floating point has interesting non-repeating shuffles, and even
10427 /// those are still *marginally* more expensive.
10428 static SDValue lowerVectorShuffleByMerging128BitLanes(
10429 SDLoc DL, MVT VT, SDValue V1, SDValue V2, ArrayRef<int> Mask,
10430 const X86Subtarget *Subtarget, SelectionDAG &DAG) {
10431 assert(!isSingleInputShuffleMask(Mask) &&
10432 "This is only useful with multiple inputs.");
10434 int Size = Mask.size();
10435 int LaneSize = 128 / VT.getScalarSizeInBits();
10436 int NumLanes = Size / LaneSize;
10437 assert(NumLanes > 1 && "Only handles 256-bit and wider shuffles.");
10439 // See if we can build a hypothetical 128-bit lane-fixing shuffle mask. Also
10440 // check whether the in-128-bit lane shuffles share a repeating pattern.
10441 SmallVector<int, 4> Lanes;
10442 Lanes.resize(NumLanes, -1);
10443 SmallVector<int, 4> InLaneMask;
10444 InLaneMask.resize(LaneSize, -1);
10445 for (int i = 0; i < Size; ++i) {
10449 int j = i / LaneSize;
10451 if (Lanes[j] < 0) {
10452 // First entry we've seen for this lane.
10453 Lanes[j] = Mask[i] / LaneSize;
10454 } else if (Lanes[j] != Mask[i] / LaneSize) {
10455 // This doesn't match the lane selected previously!
10459 // Check that within each lane we have a consistent shuffle mask.
10460 int k = i % LaneSize;
10461 if (InLaneMask[k] < 0) {
10462 InLaneMask[k] = Mask[i] % LaneSize;
10463 } else if (InLaneMask[k] != Mask[i] % LaneSize) {
10464 // This doesn't fit a repeating in-lane mask.
10469 // First shuffle the lanes into place.
10470 MVT LaneVT = MVT::getVectorVT(VT.isFloatingPoint() ? MVT::f64 : MVT::i64,
10471 VT.getSizeInBits() / 64);
10472 SmallVector<int, 8> LaneMask;
10473 LaneMask.resize(NumLanes * 2, -1);
10474 for (int i = 0; i < NumLanes; ++i)
10475 if (Lanes[i] >= 0) {
10476 LaneMask[2 * i + 0] = 2*Lanes[i] + 0;
10477 LaneMask[2 * i + 1] = 2*Lanes[i] + 1;
10480 V1 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V1);
10481 V2 = DAG.getNode(ISD::BITCAST, DL, LaneVT, V2);
10482 SDValue LaneShuffle = DAG.getVectorShuffle(LaneVT, DL, V1, V2, LaneMask);
10484 // Cast it back to the type we actually want.
10485 LaneShuffle = DAG.getNode(ISD::BITCAST, DL, VT, LaneShuffle);
10487 // Now do a simple shuffle that isn't lane crossing.
10488 SmallVector<int, 8> NewMask;
10489 NewMask.resize(Size, -1);
10490 for (int i = 0; i < Size; ++i)
10492 NewMask[i] = (i / LaneSize) * LaneSize + Mask[i] % LaneSize;
10493 assert(!is128BitLaneCrossingShuffleMask(VT, NewMask) &&
10494 "Must not introduce lane crosses at this point!");
10496 return DAG.getVectorShuffle(VT, DL, LaneShuffle, DAG.getUNDEF(VT), NewMask);
10499 /// \brief Test whether the specified input (0 or 1) is in-place blended by the
10502 /// This returns true if the elements from a particular input are already in the
10503 /// slot required by the given mask and require no permutation.
10504 static bool isShuffleMaskInputInPlace(int Input, ArrayRef<int> Mask) {
10505 assert((Input == 0 || Input == 1) && "Only two inputs to shuffles.");
10506 int Size = Mask.size();
10507 for (int i = 0; i < Size; ++i)
10508 if (Mask[i] >= 0 && Mask[i] / Size == Input && Mask[i] % Size != i)
10514 /// \brief Handle lowering of 4-lane 64-bit floating point shuffles.
10516 /// Also ends up handling lowering of 4-lane 64-bit integer shuffles when AVX2
10517 /// isn't available.
10518 static SDValue lowerV4F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10519 const X86Subtarget *Subtarget,
10520 SelectionDAG &DAG) {
10522 assert(V1.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10523 assert(V2.getSimpleValueType() == MVT::v4f64 && "Bad operand type!");
10524 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10525 ArrayRef<int> Mask = SVOp->getMask();
10526 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10528 SmallVector<int, 4> WidenedMask;
10529 if (canWidenShuffleElements(Mask, WidenedMask))
10530 return lowerV2X128VectorShuffle(DL, MVT::v4f64, V1, V2, Mask, Subtarget,
10533 if (isSingleInputShuffleMask(Mask)) {
10534 // Check for being able to broadcast a single element.
10535 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4f64, DL, V1,
10536 Mask, Subtarget, DAG))
10539 // Use low duplicate instructions for masks that match their pattern.
10540 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2))
10541 return DAG.getNode(X86ISD::MOVDDUP, DL, MVT::v4f64, V1);
10543 if (!is128BitLaneCrossingShuffleMask(MVT::v4f64, Mask)) {
10544 // Non-half-crossing single input shuffles can be lowerid with an
10545 // interleaved permutation.
10546 unsigned VPERMILPMask = (Mask[0] == 1) | ((Mask[1] == 1) << 1) |
10547 ((Mask[2] == 3) << 2) | ((Mask[3] == 3) << 3);
10548 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v4f64, V1,
10549 DAG.getConstant(VPERMILPMask, MVT::i8));
10552 // With AVX2 we have direct support for this permutation.
10553 if (Subtarget->hasAVX2())
10554 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4f64, V1,
10555 getV4X86ShuffleImm8ForMask(Mask, DAG));
10557 // Otherwise, fall back.
10558 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v4f64, V1, V2, Mask,
10562 // X86 has dedicated unpack instructions that can handle specific blend
10563 // operations: UNPCKH and UNPCKL.
10564 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10565 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4f64, V1, V2);
10566 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10567 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4f64, V1, V2);
10569 // If we have a single input to the zero element, insert that into V1 if we
10570 // can do so cheaply.
10571 int NumV2Elements =
10572 std::count_if(Mask.begin(), Mask.end(), [](int M) { return M >= 4; });
10573 if (NumV2Elements == 1 && Mask[0] >= 4)
10574 if (SDValue Insertion = lowerVectorShuffleAsElementInsertion(
10575 MVT::v4f64, DL, V1, V2, Mask, Subtarget, DAG))
10578 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4f64, V1, V2, Mask,
10582 // Check if the blend happens to exactly fit that of SHUFPD.
10583 if ((Mask[0] == -1 || Mask[0] < 2) &&
10584 (Mask[1] == -1 || (Mask[1] >= 4 && Mask[1] < 6)) &&
10585 (Mask[2] == -1 || (Mask[2] >= 2 && Mask[2] < 4)) &&
10586 (Mask[3] == -1 || Mask[3] >= 6)) {
10587 unsigned SHUFPDMask = (Mask[0] == 1) | ((Mask[1] == 5) << 1) |
10588 ((Mask[2] == 3) << 2) | ((Mask[3] == 7) << 3);
10589 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V1, V2,
10590 DAG.getConstant(SHUFPDMask, MVT::i8));
10592 if ((Mask[0] == -1 || (Mask[0] >= 4 && Mask[0] < 6)) &&
10593 (Mask[1] == -1 || Mask[1] < 2) &&
10594 (Mask[2] == -1 || Mask[2] >= 6) &&
10595 (Mask[3] == -1 || (Mask[3] >= 2 && Mask[3] < 4))) {
10596 unsigned SHUFPDMask = (Mask[0] == 5) | ((Mask[1] == 1) << 1) |
10597 ((Mask[2] == 7) << 2) | ((Mask[3] == 3) << 3);
10598 return DAG.getNode(X86ISD::SHUFP, DL, MVT::v4f64, V2, V1,
10599 DAG.getConstant(SHUFPDMask, MVT::i8));
10602 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10603 // shuffle. However, if we have AVX2 and either inputs are already in place,
10604 // we will be able to shuffle even across lanes the other input in a single
10605 // instruction so skip this pattern.
10606 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10607 isShuffleMaskInputInPlace(1, Mask))))
10608 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10609 DL, MVT::v4f64, V1, V2, Mask, Subtarget, DAG))
10612 // If we have AVX2 then we always want to lower with a blend because an v4 we
10613 // can fully permute the elements.
10614 if (Subtarget->hasAVX2())
10615 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4f64, V1, V2,
10618 // Otherwise fall back on generic lowering.
10619 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v4f64, V1, V2, Mask, DAG);
10622 /// \brief Handle lowering of 4-lane 64-bit integer shuffles.
10624 /// This routine is only called when we have AVX2 and thus a reasonable
10625 /// instruction set for v4i64 shuffling..
10626 static SDValue lowerV4I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10627 const X86Subtarget *Subtarget,
10628 SelectionDAG &DAG) {
10630 assert(V1.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10631 assert(V2.getSimpleValueType() == MVT::v4i64 && "Bad operand type!");
10632 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10633 ArrayRef<int> Mask = SVOp->getMask();
10634 assert(Mask.size() == 4 && "Unexpected mask size for v4 shuffle!");
10635 assert(Subtarget->hasAVX2() && "We can only lower v4i64 with AVX2!");
10637 SmallVector<int, 4> WidenedMask;
10638 if (canWidenShuffleElements(Mask, WidenedMask))
10639 return lowerV2X128VectorShuffle(DL, MVT::v4i64, V1, V2, Mask, Subtarget,
10642 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v4i64, V1, V2, Mask,
10646 // Check for being able to broadcast a single element.
10647 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v4i64, DL, V1,
10648 Mask, Subtarget, DAG))
10651 // When the shuffle is mirrored between the 128-bit lanes of the unit, we can
10652 // use lower latency instructions that will operate on both 128-bit lanes.
10653 SmallVector<int, 2> RepeatedMask;
10654 if (is128BitLaneRepeatedShuffleMask(MVT::v4i64, Mask, RepeatedMask)) {
10655 if (isSingleInputShuffleMask(Mask)) {
10656 int PSHUFDMask[] = {-1, -1, -1, -1};
10657 for (int i = 0; i < 2; ++i)
10658 if (RepeatedMask[i] >= 0) {
10659 PSHUFDMask[2 * i] = 2 * RepeatedMask[i];
10660 PSHUFDMask[2 * i + 1] = 2 * RepeatedMask[i] + 1;
10662 return DAG.getNode(
10663 ISD::BITCAST, DL, MVT::v4i64,
10664 DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32,
10665 DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, V1),
10666 getV4X86ShuffleImm8ForMask(PSHUFDMask, DAG)));
10670 // AVX2 provides a direct instruction for permuting a single input across
10672 if (isSingleInputShuffleMask(Mask))
10673 return DAG.getNode(X86ISD::VPERMI, DL, MVT::v4i64, V1,
10674 getV4X86ShuffleImm8ForMask(Mask, DAG));
10676 // Try to use byte shift instructions.
10677 if (SDValue Shift = lowerVectorShuffleAsByteShift(
10678 DL, MVT::v4i64, V1, V2, Mask, DAG))
10681 // Use dedicated unpack instructions for masks that match their pattern.
10682 if (isShuffleEquivalent(V1, V2, Mask, 0, 4, 2, 6))
10683 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v4i64, V1, V2);
10684 if (isShuffleEquivalent(V1, V2, Mask, 1, 5, 3, 7))
10685 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v4i64, V1, V2);
10687 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10688 // shuffle. However, if we have AVX2 and either inputs are already in place,
10689 // we will be able to shuffle even across lanes the other input in a single
10690 // instruction so skip this pattern.
10691 if (!(Subtarget->hasAVX2() && (isShuffleMaskInputInPlace(0, Mask) ||
10692 isShuffleMaskInputInPlace(1, Mask))))
10693 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10694 DL, MVT::v4i64, V1, V2, Mask, Subtarget, DAG))
10697 // Otherwise fall back on generic blend lowering.
10698 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v4i64, V1, V2,
10702 /// \brief Handle lowering of 8-lane 32-bit floating point shuffles.
10704 /// Also ends up handling lowering of 8-lane 32-bit integer shuffles when AVX2
10705 /// isn't available.
10706 static SDValue lowerV8F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10707 const X86Subtarget *Subtarget,
10708 SelectionDAG &DAG) {
10710 assert(V1.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10711 assert(V2.getSimpleValueType() == MVT::v8f32 && "Bad operand type!");
10712 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10713 ArrayRef<int> Mask = SVOp->getMask();
10714 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10716 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8f32, V1, V2, Mask,
10720 // Check for being able to broadcast a single element.
10721 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8f32, DL, V1,
10722 Mask, Subtarget, DAG))
10725 // If the shuffle mask is repeated in each 128-bit lane, we have many more
10726 // options to efficiently lower the shuffle.
10727 SmallVector<int, 4> RepeatedMask;
10728 if (is128BitLaneRepeatedShuffleMask(MVT::v8f32, Mask, RepeatedMask)) {
10729 assert(RepeatedMask.size() == 4 &&
10730 "Repeated masks must be half the mask width!");
10732 // Use even/odd duplicate instructions for masks that match their pattern.
10733 if (isShuffleEquivalent(V1, V2, Mask, 0, 0, 2, 2, 4, 4, 6, 6))
10734 return DAG.getNode(X86ISD::MOVSLDUP, DL, MVT::v8f32, V1);
10735 if (isShuffleEquivalent(V1, V2, Mask, 1, 1, 3, 3, 5, 5, 7, 7))
10736 return DAG.getNode(X86ISD::MOVSHDUP, DL, MVT::v8f32, V1);
10738 if (isSingleInputShuffleMask(Mask))
10739 return DAG.getNode(X86ISD::VPERMILPI, DL, MVT::v8f32, V1,
10740 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10742 // Use dedicated unpack instructions for masks that match their pattern.
10743 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10744 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f32, V1, V2);
10745 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10746 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f32, V1, V2);
10748 // Otherwise, fall back to a SHUFPS sequence. Here it is important that we
10749 // have already handled any direct blends. We also need to squash the
10750 // repeated mask into a simulated v4f32 mask.
10751 for (int i = 0; i < 4; ++i)
10752 if (RepeatedMask[i] >= 8)
10753 RepeatedMask[i] -= 4;
10754 return lowerVectorShuffleWithSHUFPS(DL, MVT::v8f32, RepeatedMask, V1, V2, DAG);
10757 // If we have a single input shuffle with different shuffle patterns in the
10758 // two 128-bit lanes use the variable mask to VPERMILPS.
10759 if (isSingleInputShuffleMask(Mask)) {
10760 SDValue VPermMask[8];
10761 for (int i = 0; i < 8; ++i)
10762 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10763 : DAG.getConstant(Mask[i], MVT::i32);
10764 if (!is128BitLaneCrossingShuffleMask(MVT::v8f32, Mask))
10765 return DAG.getNode(
10766 X86ISD::VPERMILPV, DL, MVT::v8f32, V1,
10767 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask));
10769 if (Subtarget->hasAVX2())
10770 return DAG.getNode(X86ISD::VPERMV, DL, MVT::v8f32,
10771 DAG.getNode(ISD::BITCAST, DL, MVT::v8f32,
10772 DAG.getNode(ISD::BUILD_VECTOR, DL,
10773 MVT::v8i32, VPermMask)),
10776 // Otherwise, fall back.
10777 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v8f32, V1, V2, Mask,
10781 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10783 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10784 DL, MVT::v8f32, V1, V2, Mask, Subtarget, DAG))
10787 // If we have AVX2 then we always want to lower with a blend because at v8 we
10788 // can fully permute the elements.
10789 if (Subtarget->hasAVX2())
10790 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8f32, V1, V2,
10793 // Otherwise fall back on generic lowering.
10794 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, DAG);
10797 /// \brief Handle lowering of 8-lane 32-bit integer shuffles.
10799 /// This routine is only called when we have AVX2 and thus a reasonable
10800 /// instruction set for v8i32 shuffling..
10801 static SDValue lowerV8I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10802 const X86Subtarget *Subtarget,
10803 SelectionDAG &DAG) {
10805 assert(V1.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10806 assert(V2.getSimpleValueType() == MVT::v8i32 && "Bad operand type!");
10807 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10808 ArrayRef<int> Mask = SVOp->getMask();
10809 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
10810 assert(Subtarget->hasAVX2() && "We can only lower v8i32 with AVX2!");
10812 // Whenever we can lower this as a zext, that instruction is strictly faster
10813 // than any alternative. It also allows us to fold memory operands into the
10814 // shuffle in many cases.
10815 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v8i32, V1, V2,
10816 Mask, Subtarget, DAG))
10819 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v8i32, V1, V2, Mask,
10823 // Check for being able to broadcast a single element.
10824 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v8i32, DL, V1,
10825 Mask, Subtarget, DAG))
10828 // If the shuffle mask is repeated in each 128-bit lane we can use more
10829 // efficient instructions that mirror the shuffles across the two 128-bit
10831 SmallVector<int, 4> RepeatedMask;
10832 if (is128BitLaneRepeatedShuffleMask(MVT::v8i32, Mask, RepeatedMask)) {
10833 assert(RepeatedMask.size() == 4 && "Unexpected repeated mask size!");
10834 if (isSingleInputShuffleMask(Mask))
10835 return DAG.getNode(X86ISD::PSHUFD, DL, MVT::v8i32, V1,
10836 getV4X86ShuffleImm8ForMask(RepeatedMask, DAG));
10838 // Use dedicated unpack instructions for masks that match their pattern.
10839 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 1, 9, 4, 12, 5, 13))
10840 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i32, V1, V2);
10841 if (isShuffleEquivalent(V1, V2, Mask, 2, 10, 3, 11, 6, 14, 7, 15))
10842 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i32, V1, V2);
10845 // If the shuffle patterns aren't repeated but it is a single input, directly
10846 // generate a cross-lane VPERMD instruction.
10847 if (isSingleInputShuffleMask(Mask)) {
10848 SDValue VPermMask[8];
10849 for (int i = 0; i < 8; ++i)
10850 VPermMask[i] = Mask[i] < 0 ? DAG.getUNDEF(MVT::i32)
10851 : DAG.getConstant(Mask[i], MVT::i32);
10852 return DAG.getNode(
10853 X86ISD::VPERMV, DL, MVT::v8i32,
10854 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v8i32, VPermMask), V1);
10857 // Try to use bit shift instructions.
10858 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10859 DL, MVT::v8i32, V1, V2, Mask, DAG))
10862 // Try to use byte shift instructions.
10863 if (SDValue Shift = lowerVectorShuffleAsByteShift(
10864 DL, MVT::v8i32, V1, V2, Mask, DAG))
10867 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10869 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10870 DL, MVT::v8i32, V1, V2, Mask, Subtarget, DAG))
10873 // Otherwise fall back on generic blend lowering.
10874 return lowerVectorShuffleAsDecomposedShuffleBlend(DL, MVT::v8i32, V1, V2,
10878 /// \brief Handle lowering of 16-lane 16-bit integer shuffles.
10880 /// This routine is only called when we have AVX2 and thus a reasonable
10881 /// instruction set for v16i16 shuffling..
10882 static SDValue lowerV16I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10883 const X86Subtarget *Subtarget,
10884 SelectionDAG &DAG) {
10886 assert(V1.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10887 assert(V2.getSimpleValueType() == MVT::v16i16 && "Bad operand type!");
10888 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10889 ArrayRef<int> Mask = SVOp->getMask();
10890 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
10891 assert(Subtarget->hasAVX2() && "We can only lower v16i16 with AVX2!");
10893 // Whenever we can lower this as a zext, that instruction is strictly faster
10894 // than any alternative. It also allows us to fold memory operands into the
10895 // shuffle in many cases.
10896 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v16i16, V1, V2,
10897 Mask, Subtarget, DAG))
10900 // Check for being able to broadcast a single element.
10901 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v16i16, DL, V1,
10902 Mask, Subtarget, DAG))
10905 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v16i16, V1, V2, Mask,
10909 // Use dedicated unpack instructions for masks that match their pattern.
10910 if (isShuffleEquivalent(V1, V2, Mask,
10911 // First 128-bit lane:
10912 0, 16, 1, 17, 2, 18, 3, 19,
10913 // Second 128-bit lane:
10914 8, 24, 9, 25, 10, 26, 11, 27))
10915 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i16, V1, V2);
10916 if (isShuffleEquivalent(V1, V2, Mask,
10917 // First 128-bit lane:
10918 4, 20, 5, 21, 6, 22, 7, 23,
10919 // Second 128-bit lane:
10920 12, 28, 13, 29, 14, 30, 15, 31))
10921 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i16, V1, V2);
10923 if (isSingleInputShuffleMask(Mask)) {
10924 // There are no generalized cross-lane shuffle operations available on i16
10926 if (is128BitLaneCrossingShuffleMask(MVT::v16i16, Mask))
10927 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v16i16, V1, V2,
10930 SDValue PSHUFBMask[32];
10931 for (int i = 0; i < 16; ++i) {
10932 if (Mask[i] == -1) {
10933 PSHUFBMask[2 * i] = PSHUFBMask[2 * i + 1] = DAG.getUNDEF(MVT::i8);
10937 int M = i < 8 ? Mask[i] : Mask[i] - 8;
10938 assert(M >= 0 && M < 8 && "Invalid single-input mask!");
10939 PSHUFBMask[2 * i] = DAG.getConstant(2 * M, MVT::i8);
10940 PSHUFBMask[2 * i + 1] = DAG.getConstant(2 * M + 1, MVT::i8);
10942 return DAG.getNode(
10943 ISD::BITCAST, DL, MVT::v16i16,
10945 X86ISD::PSHUFB, DL, MVT::v32i8,
10946 DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, V1),
10947 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask)));
10950 // Try to use bit shift instructions.
10951 if (SDValue Shift = lowerVectorShuffleAsBitShift(
10952 DL, MVT::v16i16, V1, V2, Mask, DAG))
10955 // Try to use byte shift instructions.
10956 if (SDValue Shift = lowerVectorShuffleAsByteShift(
10957 DL, MVT::v16i16, V1, V2, Mask, DAG))
10960 // Try to simplify this by merging 128-bit lanes to enable a lane-based
10962 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
10963 DL, MVT::v16i16, V1, V2, Mask, Subtarget, DAG))
10966 // Otherwise fall back on generic lowering.
10967 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v16i16, V1, V2, Mask, DAG);
10970 /// \brief Handle lowering of 32-lane 8-bit integer shuffles.
10972 /// This routine is only called when we have AVX2 and thus a reasonable
10973 /// instruction set for v32i8 shuffling..
10974 static SDValue lowerV32I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
10975 const X86Subtarget *Subtarget,
10976 SelectionDAG &DAG) {
10978 assert(V1.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10979 assert(V2.getSimpleValueType() == MVT::v32i8 && "Bad operand type!");
10980 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
10981 ArrayRef<int> Mask = SVOp->getMask();
10982 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
10983 assert(Subtarget->hasAVX2() && "We can only lower v32i8 with AVX2!");
10985 // Whenever we can lower this as a zext, that instruction is strictly faster
10986 // than any alternative. It also allows us to fold memory operands into the
10987 // shuffle in many cases.
10988 if (SDValue ZExt = lowerVectorShuffleAsZeroOrAnyExtend(DL, MVT::v32i8, V1, V2,
10989 Mask, Subtarget, DAG))
10992 // Check for being able to broadcast a single element.
10993 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(MVT::v32i8, DL, V1,
10994 Mask, Subtarget, DAG))
10997 if (SDValue Blend = lowerVectorShuffleAsBlend(DL, MVT::v32i8, V1, V2, Mask,
11001 // Use dedicated unpack instructions for masks that match their pattern.
11002 // Note that these are repeated 128-bit lane unpacks, not unpacks across all
11004 if (isShuffleEquivalent(
11006 // First 128-bit lane:
11007 0, 32, 1, 33, 2, 34, 3, 35, 4, 36, 5, 37, 6, 38, 7, 39,
11008 // Second 128-bit lane:
11009 16, 48, 17, 49, 18, 50, 19, 51, 20, 52, 21, 53, 22, 54, 23, 55))
11010 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v32i8, V1, V2);
11011 if (isShuffleEquivalent(
11013 // First 128-bit lane:
11014 8, 40, 9, 41, 10, 42, 11, 43, 12, 44, 13, 45, 14, 46, 15, 47,
11015 // Second 128-bit lane:
11016 24, 56, 25, 57, 26, 58, 27, 59, 28, 60, 29, 61, 30, 62, 31, 63))
11017 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v32i8, V1, V2);
11019 if (isSingleInputShuffleMask(Mask)) {
11020 // There are no generalized cross-lane shuffle operations available on i8
11022 if (is128BitLaneCrossingShuffleMask(MVT::v32i8, Mask))
11023 return lowerVectorShuffleAsLanePermuteAndBlend(DL, MVT::v32i8, V1, V2,
11026 SDValue PSHUFBMask[32];
11027 for (int i = 0; i < 32; ++i)
11030 ? DAG.getUNDEF(MVT::i8)
11031 : DAG.getConstant(Mask[i] < 16 ? Mask[i] : Mask[i] - 16, MVT::i8);
11033 return DAG.getNode(
11034 X86ISD::PSHUFB, DL, MVT::v32i8, V1,
11035 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, PSHUFBMask));
11038 // Try to use bit shift instructions.
11039 if (SDValue Shift = lowerVectorShuffleAsBitShift(
11040 DL, MVT::v32i8, V1, V2, Mask, DAG))
11043 // Try to use byte shift instructions.
11044 if (SDValue Shift = lowerVectorShuffleAsByteShift(
11045 DL, MVT::v32i8, V1, V2, Mask, DAG))
11048 // Try to simplify this by merging 128-bit lanes to enable a lane-based
11050 if (SDValue Result = lowerVectorShuffleByMerging128BitLanes(
11051 DL, MVT::v32i8, V1, V2, Mask, Subtarget, DAG))
11054 // Otherwise fall back on generic lowering.
11055 return lowerVectorShuffleAsSplitOrBlend(DL, MVT::v32i8, V1, V2, Mask, DAG);
11058 /// \brief High-level routine to lower various 256-bit x86 vector shuffles.
11060 /// This routine either breaks down the specific type of a 256-bit x86 vector
11061 /// shuffle or splits it into two 128-bit shuffles and fuses the results back
11062 /// together based on the available instructions.
11063 static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11064 MVT VT, const X86Subtarget *Subtarget,
11065 SelectionDAG &DAG) {
11067 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11068 ArrayRef<int> Mask = SVOp->getMask();
11070 // There is a really nice hard cut-over between AVX1 and AVX2 that means we can
11071 // check for those subtargets here and avoid much of the subtarget querying in
11072 // the per-vector-type lowering routines. With AVX1 we have essentially *zero*
11073 // ability to manipulate a 256-bit vector with integer types. Since we'll use
11074 // floating point types there eventually, just immediately cast everything to
11075 // a float and operate entirely in that domain.
11076 if (VT.isInteger() && !Subtarget->hasAVX2()) {
11077 int ElementBits = VT.getScalarSizeInBits();
11078 if (ElementBits < 32)
11079 // No floating point type available, decompose into 128-bit vectors.
11080 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11082 MVT FpVT = MVT::getVectorVT(MVT::getFloatingPointVT(ElementBits),
11083 VT.getVectorNumElements());
11084 V1 = DAG.getNode(ISD::BITCAST, DL, FpVT, V1);
11085 V2 = DAG.getNode(ISD::BITCAST, DL, FpVT, V2);
11086 return DAG.getNode(ISD::BITCAST, DL, VT,
11087 DAG.getVectorShuffle(FpVT, DL, V1, V2, Mask));
11090 switch (VT.SimpleTy) {
11092 return lowerV4F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11094 return lowerV4I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11096 return lowerV8F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11098 return lowerV8I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11100 return lowerV16I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11102 return lowerV32I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11105 llvm_unreachable("Not a valid 256-bit x86 vector type!");
11109 /// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
11110 static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11111 const X86Subtarget *Subtarget,
11112 SelectionDAG &DAG) {
11114 assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11115 assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
11116 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11117 ArrayRef<int> Mask = SVOp->getMask();
11118 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11120 // X86 has dedicated unpack instructions that can handle specific blend
11121 // operations: UNPCKH and UNPCKL.
11122 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11123 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
11124 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11125 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
11127 // FIXME: Implement direct support for this type!
11128 return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
11131 /// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
11132 static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11133 const X86Subtarget *Subtarget,
11134 SelectionDAG &DAG) {
11136 assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11137 assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
11138 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11139 ArrayRef<int> Mask = SVOp->getMask();
11140 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11142 // Use dedicated unpack instructions for masks that match their pattern.
11143 if (isShuffleEquivalent(V1, V2, Mask,
11144 0, 16, 1, 17, 4, 20, 5, 21,
11145 8, 24, 9, 25, 12, 28, 13, 29))
11146 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
11147 if (isShuffleEquivalent(V1, V2, Mask,
11148 2, 18, 3, 19, 6, 22, 7, 23,
11149 10, 26, 11, 27, 14, 30, 15, 31))
11150 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
11152 // FIXME: Implement direct support for this type!
11153 return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
11156 /// \brief Handle lowering of 8-lane 64-bit integer shuffles.
11157 static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11158 const X86Subtarget *Subtarget,
11159 SelectionDAG &DAG) {
11161 assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11162 assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
11163 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11164 ArrayRef<int> Mask = SVOp->getMask();
11165 assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
11167 // X86 has dedicated unpack instructions that can handle specific blend
11168 // operations: UNPCKH and UNPCKL.
11169 if (isShuffleEquivalent(V1, V2, Mask, 0, 8, 2, 10, 4, 12, 6, 14))
11170 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
11171 if (isShuffleEquivalent(V1, V2, Mask, 1, 9, 3, 11, 5, 13, 7, 15))
11172 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
11174 // FIXME: Implement direct support for this type!
11175 return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
11178 /// \brief Handle lowering of 16-lane 32-bit integer shuffles.
11179 static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11180 const X86Subtarget *Subtarget,
11181 SelectionDAG &DAG) {
11183 assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11184 assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
11185 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11186 ArrayRef<int> Mask = SVOp->getMask();
11187 assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
11189 // Use dedicated unpack instructions for masks that match their pattern.
11190 if (isShuffleEquivalent(V1, V2, Mask,
11191 0, 16, 1, 17, 4, 20, 5, 21,
11192 8, 24, 9, 25, 12, 28, 13, 29))
11193 return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
11194 if (isShuffleEquivalent(V1, V2, Mask,
11195 2, 18, 3, 19, 6, 22, 7, 23,
11196 10, 26, 11, 27, 14, 30, 15, 31))
11197 return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
11199 // FIXME: Implement direct support for this type!
11200 return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
11203 /// \brief Handle lowering of 32-lane 16-bit integer shuffles.
11204 static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11205 const X86Subtarget *Subtarget,
11206 SelectionDAG &DAG) {
11208 assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11209 assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
11210 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11211 ArrayRef<int> Mask = SVOp->getMask();
11212 assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
11213 assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
11215 // FIXME: Implement direct support for this type!
11216 return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
11219 /// \brief Handle lowering of 64-lane 8-bit integer shuffles.
11220 static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11221 const X86Subtarget *Subtarget,
11222 SelectionDAG &DAG) {
11224 assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11225 assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
11226 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11227 ArrayRef<int> Mask = SVOp->getMask();
11228 assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
11229 assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
11231 // FIXME: Implement direct support for this type!
11232 return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
11235 /// \brief High-level routine to lower various 512-bit x86 vector shuffles.
11237 /// This routine either breaks down the specific type of a 512-bit x86 vector
11238 /// shuffle or splits it into two 256-bit shuffles and fuses the results back
11239 /// together based on the available instructions.
11240 static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
11241 MVT VT, const X86Subtarget *Subtarget,
11242 SelectionDAG &DAG) {
11244 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11245 ArrayRef<int> Mask = SVOp->getMask();
11246 assert(Subtarget->hasAVX512() &&
11247 "Cannot lower 512-bit vectors w/ basic ISA!");
11249 // Check for being able to broadcast a single element.
11250 if (SDValue Broadcast = lowerVectorShuffleAsBroadcast(VT.SimpleTy, DL, V1,
11251 Mask, Subtarget, DAG))
11254 // Dispatch to each element type for lowering. If we don't have supprot for
11255 // specific element type shuffles at 512 bits, immediately split them and
11256 // lower them. Each lowering routine of a given type is allowed to assume that
11257 // the requisite ISA extensions for that element type are available.
11258 switch (VT.SimpleTy) {
11260 return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11262 return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11264 return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
11266 return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
11268 if (Subtarget->hasBWI())
11269 return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
11272 if (Subtarget->hasBWI())
11273 return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
11277 llvm_unreachable("Not a valid 512-bit x86 vector type!");
11280 // Otherwise fall back on splitting.
11281 return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
11284 /// \brief Top-level lowering for x86 vector shuffles.
11286 /// This handles decomposition, canonicalization, and lowering of all x86
11287 /// vector shuffles. Most of the specific lowering strategies are encapsulated
11288 /// above in helper routines. The canonicalization attempts to widen shuffles
11289 /// to involve fewer lanes of wider elements, consolidate symmetric patterns
11290 /// s.t. only one of the two inputs needs to be tested, etc.
11291 static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
11292 SelectionDAG &DAG) {
11293 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11294 ArrayRef<int> Mask = SVOp->getMask();
11295 SDValue V1 = Op.getOperand(0);
11296 SDValue V2 = Op.getOperand(1);
11297 MVT VT = Op.getSimpleValueType();
11298 int NumElements = VT.getVectorNumElements();
11301 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
11303 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
11304 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11305 if (V1IsUndef && V2IsUndef)
11306 return DAG.getUNDEF(VT);
11308 // When we create a shuffle node we put the UNDEF node to second operand,
11309 // but in some cases the first operand may be transformed to UNDEF.
11310 // In this case we should just commute the node.
11312 return DAG.getCommutedVectorShuffle(*SVOp);
11314 // Check for non-undef masks pointing at an undef vector and make the masks
11315 // undef as well. This makes it easier to match the shuffle based solely on
11319 if (M >= NumElements) {
11320 SmallVector<int, 8> NewMask(Mask.begin(), Mask.end());
11321 for (int &M : NewMask)
11322 if (M >= NumElements)
11324 return DAG.getVectorShuffle(VT, dl, V1, V2, NewMask);
11327 // We actually see shuffles that are entirely re-arrangements of a set of
11328 // zero inputs. This mostly happens while decomposing complex shuffles into
11329 // simple ones. Directly lower these as a buildvector of zeros.
11330 SmallBitVector Zeroable = computeZeroableShuffleElements(Mask, V1, V2);
11331 if (Zeroable.all())
11332 return getZeroVector(VT, Subtarget, DAG, dl);
11334 // Try to collapse shuffles into using a vector type with fewer elements but
11335 // wider element types. We cap this to not form integers or floating point
11336 // elements wider than 64 bits, but it might be interesting to form i128
11337 // integers to handle flipping the low and high halves of AVX 256-bit vectors.
11338 SmallVector<int, 16> WidenedMask;
11339 if (VT.getScalarSizeInBits() < 64 &&
11340 canWidenShuffleElements(Mask, WidenedMask)) {
11341 MVT NewEltVT = VT.isFloatingPoint()
11342 ? MVT::getFloatingPointVT(VT.getScalarSizeInBits() * 2)
11343 : MVT::getIntegerVT(VT.getScalarSizeInBits() * 2);
11344 MVT NewVT = MVT::getVectorVT(NewEltVT, VT.getVectorNumElements() / 2);
11345 // Make sure that the new vector type is legal. For example, v2f64 isn't
11347 if (DAG.getTargetLoweringInfo().isTypeLegal(NewVT)) {
11348 V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, V1);
11349 V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, V2);
11350 return DAG.getNode(ISD::BITCAST, dl, VT,
11351 DAG.getVectorShuffle(NewVT, dl, V1, V2, WidenedMask));
11355 int NumV1Elements = 0, NumUndefElements = 0, NumV2Elements = 0;
11356 for (int M : SVOp->getMask())
11358 ++NumUndefElements;
11359 else if (M < NumElements)
11364 // Commute the shuffle as needed such that more elements come from V1 than
11365 // V2. This allows us to match the shuffle pattern strictly on how many
11366 // elements come from V1 without handling the symmetric cases.
11367 if (NumV2Elements > NumV1Elements)
11368 return DAG.getCommutedVectorShuffle(*SVOp);
11370 // When the number of V1 and V2 elements are the same, try to minimize the
11371 // number of uses of V2 in the low half of the vector. When that is tied,
11372 // ensure that the sum of indices for V1 is equal to or lower than the sum
11373 // indices for V2. When those are equal, try to ensure that the number of odd
11374 // indices for V1 is lower than the number of odd indices for V2.
11375 if (NumV1Elements == NumV2Elements) {
11376 int LowV1Elements = 0, LowV2Elements = 0;
11377 for (int M : SVOp->getMask().slice(0, NumElements / 2))
11378 if (M >= NumElements)
11382 if (LowV2Elements > LowV1Elements) {
11383 return DAG.getCommutedVectorShuffle(*SVOp);
11384 } else if (LowV2Elements == LowV1Elements) {
11385 int SumV1Indices = 0, SumV2Indices = 0;
11386 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11387 if (SVOp->getMask()[i] >= NumElements)
11389 else if (SVOp->getMask()[i] >= 0)
11391 if (SumV2Indices < SumV1Indices) {
11392 return DAG.getCommutedVectorShuffle(*SVOp);
11393 } else if (SumV2Indices == SumV1Indices) {
11394 int NumV1OddIndices = 0, NumV2OddIndices = 0;
11395 for (int i = 0, Size = SVOp->getMask().size(); i < Size; ++i)
11396 if (SVOp->getMask()[i] >= NumElements)
11397 NumV2OddIndices += i % 2;
11398 else if (SVOp->getMask()[i] >= 0)
11399 NumV1OddIndices += i % 2;
11400 if (NumV2OddIndices < NumV1OddIndices)
11401 return DAG.getCommutedVectorShuffle(*SVOp);
11406 // For each vector width, delegate to a specialized lowering routine.
11407 if (VT.getSizeInBits() == 128)
11408 return lower128BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11410 if (VT.getSizeInBits() == 256)
11411 return lower256BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11413 // Force AVX-512 vectors to be scalarized for now.
11414 // FIXME: Implement AVX-512 support!
11415 if (VT.getSizeInBits() == 512)
11416 return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
11418 llvm_unreachable("Unimplemented!");
11422 //===----------------------------------------------------------------------===//
11423 // Legacy vector shuffle lowering
11425 // This code is the legacy code handling vector shuffles until the above
11426 // replaces its functionality and performance.
11427 //===----------------------------------------------------------------------===//
11429 static bool isBlendMask(ArrayRef<int> MaskVals, MVT VT, bool hasSSE41,
11430 bool hasInt256, unsigned *MaskOut = nullptr) {
11431 MVT EltVT = VT.getVectorElementType();
11433 // There is no blend with immediate in AVX-512.
11434 if (VT.is512BitVector())
11437 if (!hasSSE41 || EltVT == MVT::i8)
11439 if (!hasInt256 && VT == MVT::v16i16)
11442 unsigned MaskValue = 0;
11443 unsigned NumElems = VT.getVectorNumElements();
11444 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
11445 unsigned NumLanes = (NumElems - 1) / 8 + 1;
11446 unsigned NumElemsInLane = NumElems / NumLanes;
11448 // Blend for v16i16 should be symmetric for both lanes.
11449 for (unsigned i = 0; i < NumElemsInLane; ++i) {
11451 int SndLaneEltIdx = (NumLanes == 2) ? MaskVals[i + NumElemsInLane] : -1;
11452 int EltIdx = MaskVals[i];
11454 if ((EltIdx < 0 || EltIdx == (int)i) &&
11455 (SndLaneEltIdx < 0 || SndLaneEltIdx == (int)(i + NumElemsInLane)))
11458 if (((unsigned)EltIdx == (i + NumElems)) &&
11459 (SndLaneEltIdx < 0 ||
11460 (unsigned)SndLaneEltIdx == i + NumElems + NumElemsInLane))
11461 MaskValue |= (1 << i);
11467 *MaskOut = MaskValue;
11471 // Try to lower a shuffle node into a simple blend instruction.
11472 // This function assumes isBlendMask returns true for this
11473 // SuffleVectorSDNode
11474 static SDValue LowerVECTOR_SHUFFLEtoBlend(ShuffleVectorSDNode *SVOp,
11475 unsigned MaskValue,
11476 const X86Subtarget *Subtarget,
11477 SelectionDAG &DAG) {
11478 MVT VT = SVOp->getSimpleValueType(0);
11479 MVT EltVT = VT.getVectorElementType();
11480 assert(isBlendMask(SVOp->getMask(), VT, Subtarget->hasSSE41(),
11481 Subtarget->hasInt256() && "Trying to lower a "
11482 "VECTOR_SHUFFLE to a Blend but "
11483 "with the wrong mask"));
11484 SDValue V1 = SVOp->getOperand(0);
11485 SDValue V2 = SVOp->getOperand(1);
11487 unsigned NumElems = VT.getVectorNumElements();
11489 // Convert i32 vectors to floating point if it is not AVX2.
11490 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
11492 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
11493 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
11495 V1 = DAG.getNode(ISD::BITCAST, dl, VT, V1);
11496 V2 = DAG.getNode(ISD::BITCAST, dl, VT, V2);
11499 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, V1, V2,
11500 DAG.getConstant(MaskValue, MVT::i32));
11501 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
11504 /// In vector type \p VT, return true if the element at index \p InputIdx
11505 /// falls on a different 128-bit lane than \p OutputIdx.
11506 static bool ShuffleCrosses128bitLane(MVT VT, unsigned InputIdx,
11507 unsigned OutputIdx) {
11508 unsigned EltSize = VT.getVectorElementType().getSizeInBits();
11509 return InputIdx * EltSize / 128 != OutputIdx * EltSize / 128;
11512 /// Generate a PSHUFB if possible. Selects elements from \p V1 according to
11513 /// \p MaskVals. MaskVals[OutputIdx] = InputIdx specifies that we want to
11514 /// shuffle the element at InputIdx in V1 to OutputIdx in the result. If \p
11515 /// MaskVals refers to elements outside of \p V1 or is undef (-1), insert a
11517 static SDValue getPSHUFB(ArrayRef<int> MaskVals, SDValue V1, SDLoc &dl,
11518 SelectionDAG &DAG) {
11519 MVT VT = V1.getSimpleValueType();
11520 assert(VT.is128BitVector() || VT.is256BitVector());
11522 MVT EltVT = VT.getVectorElementType();
11523 unsigned EltSizeInBytes = EltVT.getSizeInBits() / 8;
11524 unsigned NumElts = VT.getVectorNumElements();
11526 SmallVector<SDValue, 32> PshufbMask;
11527 for (unsigned OutputIdx = 0; OutputIdx < NumElts; ++OutputIdx) {
11528 int InputIdx = MaskVals[OutputIdx];
11529 unsigned InputByteIdx;
11531 if (InputIdx < 0 || NumElts <= (unsigned)InputIdx)
11532 InputByteIdx = 0x80;
11534 // Cross lane is not allowed.
11535 if (ShuffleCrosses128bitLane(VT, InputIdx, OutputIdx))
11537 InputByteIdx = InputIdx * EltSizeInBytes;
11538 // Index is an byte offset within the 128-bit lane.
11539 InputByteIdx &= 0xf;
11542 for (unsigned j = 0; j < EltSizeInBytes; ++j) {
11543 PshufbMask.push_back(DAG.getConstant(InputByteIdx, MVT::i8));
11544 if (InputByteIdx != 0x80)
11549 MVT ShufVT = MVT::getVectorVT(MVT::i8, PshufbMask.size());
11551 V1 = DAG.getNode(ISD::BITCAST, dl, ShufVT, V1);
11552 return DAG.getNode(X86ISD::PSHUFB, dl, ShufVT, V1,
11553 DAG.getNode(ISD::BUILD_VECTOR, dl, ShufVT, PshufbMask));
11556 // v8i16 shuffles - Prefer shuffles in the following order:
11557 // 1. [all] pshuflw, pshufhw, optional move
11558 // 2. [ssse3] 1 x pshufb
11559 // 3. [ssse3] 2 x pshufb + 1 x por
11560 // 4. [all] mov + pshuflw + pshufhw + N x (pextrw + pinsrw)
11562 LowerVECTOR_SHUFFLEv8i16(SDValue Op, const X86Subtarget *Subtarget,
11563 SelectionDAG &DAG) {
11564 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11565 SDValue V1 = SVOp->getOperand(0);
11566 SDValue V2 = SVOp->getOperand(1);
11568 SmallVector<int, 8> MaskVals;
11570 // Determine if more than 1 of the words in each of the low and high quadwords
11571 // of the result come from the same quadword of one of the two inputs. Undef
11572 // mask values count as coming from any quadword, for better codegen.
11574 // Lo/HiQuad[i] = j indicates how many words from the ith quad of the input
11575 // feeds this quad. For i, 0 and 1 refer to V1, 2 and 3 refer to V2.
11576 unsigned LoQuad[] = { 0, 0, 0, 0 };
11577 unsigned HiQuad[] = { 0, 0, 0, 0 };
11578 // Indices of quads used.
11579 std::bitset<4> InputQuads;
11580 for (unsigned i = 0; i < 8; ++i) {
11581 unsigned *Quad = i < 4 ? LoQuad : HiQuad;
11582 int EltIdx = SVOp->getMaskElt(i);
11583 MaskVals.push_back(EltIdx);
11591 ++Quad[EltIdx / 4];
11592 InputQuads.set(EltIdx / 4);
11595 int BestLoQuad = -1;
11596 unsigned MaxQuad = 1;
11597 for (unsigned i = 0; i < 4; ++i) {
11598 if (LoQuad[i] > MaxQuad) {
11600 MaxQuad = LoQuad[i];
11604 int BestHiQuad = -1;
11606 for (unsigned i = 0; i < 4; ++i) {
11607 if (HiQuad[i] > MaxQuad) {
11609 MaxQuad = HiQuad[i];
11613 // For SSSE3, If all 8 words of the result come from only 1 quadword of each
11614 // of the two input vectors, shuffle them into one input vector so only a
11615 // single pshufb instruction is necessary. If there are more than 2 input
11616 // quads, disable the next transformation since it does not help SSSE3.
11617 bool V1Used = InputQuads[0] || InputQuads[1];
11618 bool V2Used = InputQuads[2] || InputQuads[3];
11619 if (Subtarget->hasSSSE3()) {
11620 if (InputQuads.count() == 2 && V1Used && V2Used) {
11621 BestLoQuad = InputQuads[0] ? 0 : 1;
11622 BestHiQuad = InputQuads[2] ? 2 : 3;
11624 if (InputQuads.count() > 2) {
11630 // If BestLoQuad or BestHiQuad are set, shuffle the quads together and update
11631 // the shuffle mask. If a quad is scored as -1, that means that it contains
11632 // words from all 4 input quadwords.
11634 if (BestLoQuad >= 0 || BestHiQuad >= 0) {
11636 BestLoQuad < 0 ? 0 : BestLoQuad,
11637 BestHiQuad < 0 ? 1 : BestHiQuad
11639 NewV = DAG.getVectorShuffle(MVT::v2i64, dl,
11640 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V1),
11641 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, V2), &MaskV[0]);
11642 NewV = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, NewV);
11644 // Rewrite the MaskVals and assign NewV to V1 if NewV now contains all the
11645 // source words for the shuffle, to aid later transformations.
11646 bool AllWordsInNewV = true;
11647 bool InOrder[2] = { true, true };
11648 for (unsigned i = 0; i != 8; ++i) {
11649 int idx = MaskVals[i];
11651 InOrder[i/4] = false;
11652 if (idx < 0 || (idx/4) == BestLoQuad || (idx/4) == BestHiQuad)
11654 AllWordsInNewV = false;
11658 bool pshuflw = AllWordsInNewV, pshufhw = AllWordsInNewV;
11659 if (AllWordsInNewV) {
11660 for (int i = 0; i != 8; ++i) {
11661 int idx = MaskVals[i];
11664 idx = MaskVals[i] = (idx / 4) == BestLoQuad ? (idx & 3) : (idx & 3) + 4;
11665 if ((idx != i) && idx < 4)
11667 if ((idx != i) && idx > 3)
11676 // If we've eliminated the use of V2, and the new mask is a pshuflw or
11677 // pshufhw, that's as cheap as it gets. Return the new shuffle.
11678 if ((pshufhw && InOrder[0]) || (pshuflw && InOrder[1])) {
11679 unsigned Opc = pshufhw ? X86ISD::PSHUFHW : X86ISD::PSHUFLW;
11680 unsigned TargetMask = 0;
11681 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV,
11682 DAG.getUNDEF(MVT::v8i16), &MaskVals[0]);
11683 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11684 TargetMask = pshufhw ? getShufflePSHUFHWImmediate(SVOp):
11685 getShufflePSHUFLWImmediate(SVOp);
11686 V1 = NewV.getOperand(0);
11687 return getTargetShuffleNode(Opc, dl, MVT::v8i16, V1, TargetMask, DAG);
11691 // Promote splats to a larger type which usually leads to more efficient code.
11692 // FIXME: Is this true if pshufb is available?
11693 if (SVOp->isSplat())
11694 return PromoteSplat(SVOp, DAG);
11696 // If we have SSSE3, and all words of the result are from 1 input vector,
11697 // case 2 is generated, otherwise case 3 is generated. If no SSSE3
11698 // is present, fall back to case 4.
11699 if (Subtarget->hasSSSE3()) {
11700 SmallVector<SDValue,16> pshufbMask;
11702 // If we have elements from both input vectors, set the high bit of the
11703 // shuffle mask element to zero out elements that come from V2 in the V1
11704 // mask, and elements that come from V1 in the V2 mask, so that the two
11705 // results can be OR'd together.
11706 bool TwoInputs = V1Used && V2Used;
11707 V1 = getPSHUFB(MaskVals, V1, dl, DAG);
11709 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11711 // Calculate the shuffle mask for the second input, shuffle it, and
11712 // OR it with the first shuffled input.
11713 CommuteVectorShuffleMask(MaskVals, 8);
11714 V2 = getPSHUFB(MaskVals, V2, dl, DAG);
11715 V1 = DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11716 return DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11719 // If BestLoQuad >= 0, generate a pshuflw to put the low elements in order,
11720 // and update MaskVals with new element order.
11721 std::bitset<8> InOrder;
11722 if (BestLoQuad >= 0) {
11723 int MaskV[] = { -1, -1, -1, -1, 4, 5, 6, 7 };
11724 for (int i = 0; i != 4; ++i) {
11725 int idx = MaskVals[i];
11728 } else if ((idx / 4) == BestLoQuad) {
11729 MaskV[i] = idx & 3;
11733 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11736 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11737 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11738 NewV = getTargetShuffleNode(X86ISD::PSHUFLW, dl, MVT::v8i16,
11739 NewV.getOperand(0),
11740 getShufflePSHUFLWImmediate(SVOp), DAG);
11744 // If BestHi >= 0, generate a pshufhw to put the high elements in order,
11745 // and update MaskVals with the new element order.
11746 if (BestHiQuad >= 0) {
11747 int MaskV[] = { 0, 1, 2, 3, -1, -1, -1, -1 };
11748 for (unsigned i = 4; i != 8; ++i) {
11749 int idx = MaskVals[i];
11752 } else if ((idx / 4) == BestHiQuad) {
11753 MaskV[i] = (idx & 3) + 4;
11757 NewV = DAG.getVectorShuffle(MVT::v8i16, dl, NewV, DAG.getUNDEF(MVT::v8i16),
11760 if (NewV.getOpcode() == ISD::VECTOR_SHUFFLE && Subtarget->hasSSE2()) {
11761 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(NewV.getNode());
11762 NewV = getTargetShuffleNode(X86ISD::PSHUFHW, dl, MVT::v8i16,
11763 NewV.getOperand(0),
11764 getShufflePSHUFHWImmediate(SVOp), DAG);
11768 // In case BestHi & BestLo were both -1, which means each quadword has a word
11769 // from each of the four input quadwords, calculate the InOrder bitvector now
11770 // before falling through to the insert/extract cleanup.
11771 if (BestLoQuad == -1 && BestHiQuad == -1) {
11773 for (int i = 0; i != 8; ++i)
11774 if (MaskVals[i] < 0 || MaskVals[i] == i)
11778 // The other elements are put in the right place using pextrw and pinsrw.
11779 for (unsigned i = 0; i != 8; ++i) {
11782 int EltIdx = MaskVals[i];
11785 SDValue ExtOp = (EltIdx < 8) ?
11786 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V1,
11787 DAG.getIntPtrConstant(EltIdx)) :
11788 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, V2,
11789 DAG.getIntPtrConstant(EltIdx - 8));
11790 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, ExtOp,
11791 DAG.getIntPtrConstant(i));
11796 /// \brief v16i16 shuffles
11798 /// FIXME: We only support generation of a single pshufb currently. We can
11799 /// generalize the other applicable cases from LowerVECTOR_SHUFFLEv8i16 as
11800 /// well (e.g 2 x pshufb + 1 x por).
11802 LowerVECTOR_SHUFFLEv16i16(SDValue Op, SelectionDAG &DAG) {
11803 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
11804 SDValue V1 = SVOp->getOperand(0);
11805 SDValue V2 = SVOp->getOperand(1);
11808 if (V2.getOpcode() != ISD::UNDEF)
11811 SmallVector<int, 16> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11812 return getPSHUFB(MaskVals, V1, dl, DAG);
11815 // v16i8 shuffles - Prefer shuffles in the following order:
11816 // 1. [ssse3] 1 x pshufb
11817 // 2. [ssse3] 2 x pshufb + 1 x por
11818 // 3. [all] v8i16 shuffle + N x pextrw + rotate + pinsrw
11819 static SDValue LowerVECTOR_SHUFFLEv16i8(ShuffleVectorSDNode *SVOp,
11820 const X86Subtarget* Subtarget,
11821 SelectionDAG &DAG) {
11822 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
11823 SDValue V1 = SVOp->getOperand(0);
11824 SDValue V2 = SVOp->getOperand(1);
11826 ArrayRef<int> MaskVals = SVOp->getMask();
11828 // Promote splats to a larger type which usually leads to more efficient code.
11829 // FIXME: Is this true if pshufb is available?
11830 if (SVOp->isSplat())
11831 return PromoteSplat(SVOp, DAG);
11833 // If we have SSSE3, case 1 is generated when all result bytes come from
11834 // one of the inputs. Otherwise, case 2 is generated. If no SSSE3 is
11835 // present, fall back to case 3.
11837 // If SSSE3, use 1 pshufb instruction per vector with elements in the result.
11838 if (Subtarget->hasSSSE3()) {
11839 SmallVector<SDValue,16> pshufbMask;
11841 // If all result elements are from one input vector, then only translate
11842 // undef mask values to 0x80 (zero out result) in the pshufb mask.
11844 // Otherwise, we have elements from both input vectors, and must zero out
11845 // elements that come from V2 in the first mask, and V1 in the second mask
11846 // so that we can OR them together.
11847 for (unsigned i = 0; i != 16; ++i) {
11848 int EltIdx = MaskVals[i];
11849 if (EltIdx < 0 || EltIdx >= 16)
11851 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11853 V1 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V1,
11854 DAG.getNode(ISD::BUILD_VECTOR, dl,
11855 MVT::v16i8, pshufbMask));
11857 // As PSHUFB will zero elements with negative indices, it's safe to ignore
11858 // the 2nd operand if it's undefined or zero.
11859 if (V2.getOpcode() == ISD::UNDEF ||
11860 ISD::isBuildVectorAllZeros(V2.getNode()))
11863 // Calculate the shuffle mask for the second input, shuffle it, and
11864 // OR it with the first shuffled input.
11865 pshufbMask.clear();
11866 for (unsigned i = 0; i != 16; ++i) {
11867 int EltIdx = MaskVals[i];
11868 EltIdx = (EltIdx < 16) ? 0x80 : EltIdx - 16;
11869 pshufbMask.push_back(DAG.getConstant(EltIdx, MVT::i8));
11871 V2 = DAG.getNode(X86ISD::PSHUFB, dl, MVT::v16i8, V2,
11872 DAG.getNode(ISD::BUILD_VECTOR, dl,
11873 MVT::v16i8, pshufbMask));
11874 return DAG.getNode(ISD::OR, dl, MVT::v16i8, V1, V2);
11877 // No SSSE3 - Calculate in place words and then fix all out of place words
11878 // With 0-16 extracts & inserts. Worst case is 16 bytes out of order from
11879 // the 16 different words that comprise the two doublequadword input vectors.
11880 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V1);
11881 V2 = DAG.getNode(ISD::BITCAST, dl, MVT::v8i16, V2);
11883 for (int i = 0; i != 8; ++i) {
11884 int Elt0 = MaskVals[i*2];
11885 int Elt1 = MaskVals[i*2+1];
11887 // This word of the result is all undef, skip it.
11888 if (Elt0 < 0 && Elt1 < 0)
11891 // This word of the result is already in the correct place, skip it.
11892 if ((Elt0 == i*2) && (Elt1 == i*2+1))
11895 SDValue Elt0Src = Elt0 < 16 ? V1 : V2;
11896 SDValue Elt1Src = Elt1 < 16 ? V1 : V2;
11899 // If Elt0 and Elt1 are defined, are consecutive, and can be load
11900 // using a single extract together, load it and store it.
11901 if ((Elt0 >= 0) && ((Elt0 + 1) == Elt1) && ((Elt0 & 1) == 0)) {
11902 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11903 DAG.getIntPtrConstant(Elt1 / 2));
11904 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11905 DAG.getIntPtrConstant(i));
11909 // If Elt1 is defined, extract it from the appropriate source. If the
11910 // source byte is not also odd, shift the extracted word left 8 bits
11911 // otherwise clear the bottom 8 bits if we need to do an or.
11913 InsElt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16, Elt1Src,
11914 DAG.getIntPtrConstant(Elt1 / 2));
11915 if ((Elt1 & 1) == 0)
11916 InsElt = DAG.getNode(ISD::SHL, dl, MVT::i16, InsElt,
11918 TLI.getShiftAmountTy(InsElt.getValueType())));
11919 else if (Elt0 >= 0)
11920 InsElt = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt,
11921 DAG.getConstant(0xFF00, MVT::i16));
11923 // If Elt0 is defined, extract it from the appropriate source. If the
11924 // source byte is not also even, shift the extracted word right 8 bits. If
11925 // Elt1 was also defined, OR the extracted values together before
11926 // inserting them in the result.
11928 SDValue InsElt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i16,
11929 Elt0Src, DAG.getIntPtrConstant(Elt0 / 2));
11930 if ((Elt0 & 1) != 0)
11931 InsElt0 = DAG.getNode(ISD::SRL, dl, MVT::i16, InsElt0,
11933 TLI.getShiftAmountTy(InsElt0.getValueType())));
11934 else if (Elt1 >= 0)
11935 InsElt0 = DAG.getNode(ISD::AND, dl, MVT::i16, InsElt0,
11936 DAG.getConstant(0x00FF, MVT::i16));
11937 InsElt = Elt1 >= 0 ? DAG.getNode(ISD::OR, dl, MVT::i16, InsElt, InsElt0)
11940 NewV = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, MVT::v8i16, NewV, InsElt,
11941 DAG.getIntPtrConstant(i));
11943 return DAG.getNode(ISD::BITCAST, dl, MVT::v16i8, NewV);
11946 // v32i8 shuffles - Translate to VPSHUFB if possible.
11948 SDValue LowerVECTOR_SHUFFLEv32i8(ShuffleVectorSDNode *SVOp,
11949 const X86Subtarget *Subtarget,
11950 SelectionDAG &DAG) {
11951 MVT VT = SVOp->getSimpleValueType(0);
11952 SDValue V1 = SVOp->getOperand(0);
11953 SDValue V2 = SVOp->getOperand(1);
11955 SmallVector<int, 32> MaskVals(SVOp->getMask().begin(), SVOp->getMask().end());
11957 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
11958 bool V1IsAllZero = ISD::isBuildVectorAllZeros(V1.getNode());
11959 bool V2IsAllZero = ISD::isBuildVectorAllZeros(V2.getNode());
11961 // VPSHUFB may be generated if
11962 // (1) one of input vector is undefined or zeroinitializer.
11963 // The mask value 0x80 puts 0 in the corresponding slot of the vector.
11964 // And (2) the mask indexes don't cross the 128-bit lane.
11965 if (VT != MVT::v32i8 || !Subtarget->hasInt256() ||
11966 (!V2IsUndef && !V2IsAllZero && !V1IsAllZero))
11969 if (V1IsAllZero && !V2IsAllZero) {
11970 CommuteVectorShuffleMask(MaskVals, 32);
11973 return getPSHUFB(MaskVals, V1, dl, DAG);
11976 /// RewriteAsNarrowerShuffle - Try rewriting v8i16 and v16i8 shuffles as 4 wide
11977 /// ones, or rewriting v4i32 / v4f32 as 2 wide ones if possible. This can be
11978 /// done when every pair / quad of shuffle mask elements point to elements in
11979 /// the right sequence. e.g.
11980 /// vector_shuffle X, Y, <2, 3, | 10, 11, | 0, 1, | 14, 15>
11982 SDValue RewriteAsNarrowerShuffle(ShuffleVectorSDNode *SVOp,
11983 SelectionDAG &DAG) {
11984 MVT VT = SVOp->getSimpleValueType(0);
11986 unsigned NumElems = VT.getVectorNumElements();
11989 switch (VT.SimpleTy) {
11990 default: llvm_unreachable("Unexpected!");
11993 return SDValue(SVOp, 0);
11994 case MVT::v4f32: NewVT = MVT::v2f64; Scale = 2; break;
11995 case MVT::v4i32: NewVT = MVT::v2i64; Scale = 2; break;
11996 case MVT::v8i16: NewVT = MVT::v4i32; Scale = 2; break;
11997 case MVT::v16i8: NewVT = MVT::v4i32; Scale = 4; break;
11998 case MVT::v16i16: NewVT = MVT::v8i32; Scale = 2; break;
11999 case MVT::v32i8: NewVT = MVT::v8i32; Scale = 4; break;
12002 SmallVector<int, 8> MaskVec;
12003 for (unsigned i = 0; i != NumElems; i += Scale) {
12005 for (unsigned j = 0; j != Scale; ++j) {
12006 int EltIdx = SVOp->getMaskElt(i+j);
12010 StartIdx = (EltIdx / Scale);
12011 if (EltIdx != (int)(StartIdx*Scale + j))
12014 MaskVec.push_back(StartIdx);
12017 SDValue V1 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(0));
12018 SDValue V2 = DAG.getNode(ISD::BITCAST, dl, NewVT, SVOp->getOperand(1));
12019 return DAG.getVectorShuffle(NewVT, dl, V1, V2, &MaskVec[0]);
12022 /// getVZextMovL - Return a zero-extending vector move low node.
12024 static SDValue getVZextMovL(MVT VT, MVT OpVT,
12025 SDValue SrcOp, SelectionDAG &DAG,
12026 const X86Subtarget *Subtarget, SDLoc dl) {
12027 if (VT == MVT::v2f64 || VT == MVT::v4f32) {
12028 LoadSDNode *LD = nullptr;
12029 if (!isScalarLoadToVector(SrcOp.getNode(), &LD))
12030 LD = dyn_cast<LoadSDNode>(SrcOp);
12032 // movssrr and movsdrr do not clear top bits. Try to use movd, movq
12034 MVT ExtVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
12035 if ((ExtVT != MVT::i64 || Subtarget->is64Bit()) &&
12036 SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
12037 SrcOp.getOperand(0).getOpcode() == ISD::BITCAST &&
12038 SrcOp.getOperand(0).getOperand(0).getValueType() == ExtVT) {
12040 OpVT = (OpVT == MVT::v2f64) ? MVT::v2i64 : MVT::v4i32;
12041 return DAG.getNode(ISD::BITCAST, dl, VT,
12042 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12043 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
12045 SrcOp.getOperand(0)
12051 return DAG.getNode(ISD::BITCAST, dl, VT,
12052 DAG.getNode(X86ISD::VZEXT_MOVL, dl, OpVT,
12053 DAG.getNode(ISD::BITCAST, dl,
12057 /// LowerVECTOR_SHUFFLE_256 - Handle all 256-bit wide vectors shuffles
12058 /// which could not be matched by any known target speficic shuffle
12060 LowerVECTOR_SHUFFLE_256(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12062 SDValue NewOp = Compact8x32ShuffleNode(SVOp, DAG);
12063 if (NewOp.getNode())
12066 MVT VT = SVOp->getSimpleValueType(0);
12068 unsigned NumElems = VT.getVectorNumElements();
12069 unsigned NumLaneElems = NumElems / 2;
12072 MVT EltVT = VT.getVectorElementType();
12073 MVT NVT = MVT::getVectorVT(EltVT, NumLaneElems);
12076 SmallVector<int, 16> Mask;
12077 for (unsigned l = 0; l < 2; ++l) {
12078 // Build a shuffle mask for the output, discovering on the fly which
12079 // input vectors to use as shuffle operands (recorded in InputUsed).
12080 // If building a suitable shuffle vector proves too hard, then bail
12081 // out with UseBuildVector set.
12082 bool UseBuildVector = false;
12083 int InputUsed[2] = { -1, -1 }; // Not yet discovered.
12084 unsigned LaneStart = l * NumLaneElems;
12085 for (unsigned i = 0; i != NumLaneElems; ++i) {
12086 // The mask element. This indexes into the input.
12087 int Idx = SVOp->getMaskElt(i+LaneStart);
12089 // the mask element does not index into any input vector.
12090 Mask.push_back(-1);
12094 // The input vector this mask element indexes into.
12095 int Input = Idx / NumLaneElems;
12097 // Turn the index into an offset from the start of the input vector.
12098 Idx -= Input * NumLaneElems;
12100 // Find or create a shuffle vector operand to hold this input.
12102 for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
12103 if (InputUsed[OpNo] == Input)
12104 // This input vector is already an operand.
12106 if (InputUsed[OpNo] < 0) {
12107 // Create a new operand for this input vector.
12108 InputUsed[OpNo] = Input;
12113 if (OpNo >= array_lengthof(InputUsed)) {
12114 // More than two input vectors used! Give up on trying to create a
12115 // shuffle vector. Insert all elements into a BUILD_VECTOR instead.
12116 UseBuildVector = true;
12120 // Add the mask index for the new shuffle vector.
12121 Mask.push_back(Idx + OpNo * NumLaneElems);
12124 if (UseBuildVector) {
12125 SmallVector<SDValue, 16> SVOps;
12126 for (unsigned i = 0; i != NumLaneElems; ++i) {
12127 // The mask element. This indexes into the input.
12128 int Idx = SVOp->getMaskElt(i+LaneStart);
12130 SVOps.push_back(DAG.getUNDEF(EltVT));
12134 // The input vector this mask element indexes into.
12135 int Input = Idx / NumElems;
12137 // Turn the index into an offset from the start of the input vector.
12138 Idx -= Input * NumElems;
12140 // Extract the vector element by hand.
12141 SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
12142 SVOp->getOperand(Input),
12143 DAG.getIntPtrConstant(Idx)));
12146 // Construct the output using a BUILD_VECTOR.
12147 Output[l] = DAG.getNode(ISD::BUILD_VECTOR, dl, NVT, SVOps);
12148 } else if (InputUsed[0] < 0) {
12149 // No input vectors were used! The result is undefined.
12150 Output[l] = DAG.getUNDEF(NVT);
12152 SDValue Op0 = Extract128BitVector(SVOp->getOperand(InputUsed[0] / 2),
12153 (InputUsed[0] % 2) * NumLaneElems,
12155 // If only one input was used, use an undefined vector for the other.
12156 SDValue Op1 = (InputUsed[1] < 0) ? DAG.getUNDEF(NVT) :
12157 Extract128BitVector(SVOp->getOperand(InputUsed[1] / 2),
12158 (InputUsed[1] % 2) * NumLaneElems, DAG, dl);
12159 // At least one input vector was used. Create a new shuffle vector.
12160 Output[l] = DAG.getVectorShuffle(NVT, dl, Op0, Op1, &Mask[0]);
12166 // Concatenate the result back
12167 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, Output[0], Output[1]);
12170 /// LowerVECTOR_SHUFFLE_128v4 - Handle all 128-bit wide vectors with
12171 /// 4 elements, and match them with several different shuffle types.
12173 LowerVECTOR_SHUFFLE_128v4(ShuffleVectorSDNode *SVOp, SelectionDAG &DAG) {
12174 SDValue V1 = SVOp->getOperand(0);
12175 SDValue V2 = SVOp->getOperand(1);
12177 MVT VT = SVOp->getSimpleValueType(0);
12179 assert(VT.is128BitVector() && "Unsupported vector size");
12181 std::pair<int, int> Locs[4];
12182 int Mask1[] = { -1, -1, -1, -1 };
12183 SmallVector<int, 8> PermMask(SVOp->getMask().begin(), SVOp->getMask().end());
12185 unsigned NumHi = 0;
12186 unsigned NumLo = 0;
12187 for (unsigned i = 0; i != 4; ++i) {
12188 int Idx = PermMask[i];
12190 Locs[i] = std::make_pair(-1, -1);
12192 assert(Idx < 8 && "Invalid VECTOR_SHUFFLE index!");
12194 Locs[i] = std::make_pair(0, NumLo);
12195 Mask1[NumLo] = Idx;
12198 Locs[i] = std::make_pair(1, NumHi);
12200 Mask1[2+NumHi] = Idx;
12206 if (NumLo <= 2 && NumHi <= 2) {
12207 // If no more than two elements come from either vector. This can be
12208 // implemented with two shuffles. First shuffle gather the elements.
12209 // The second shuffle, which takes the first shuffle as both of its
12210 // vector operands, put the elements into the right order.
12211 V1 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12213 int Mask2[] = { -1, -1, -1, -1 };
12215 for (unsigned i = 0; i != 4; ++i)
12216 if (Locs[i].first != -1) {
12217 unsigned Idx = (i < 2) ? 0 : 4;
12218 Idx += Locs[i].first * 2 + Locs[i].second;
12222 return DAG.getVectorShuffle(VT, dl, V1, V1, &Mask2[0]);
12225 if (NumLo == 3 || NumHi == 3) {
12226 // Otherwise, we must have three elements from one vector, call it X, and
12227 // one element from the other, call it Y. First, use a shufps to build an
12228 // intermediate vector with the one element from Y and the element from X
12229 // that will be in the same half in the final destination (the indexes don't
12230 // matter). Then, use a shufps to build the final vector, taking the half
12231 // containing the element from Y from the intermediate, and the other half
12234 // Normalize it so the 3 elements come from V1.
12235 CommuteVectorShuffleMask(PermMask, 4);
12239 // Find the element from V2.
12241 for (HiIndex = 0; HiIndex < 3; ++HiIndex) {
12242 int Val = PermMask[HiIndex];
12249 Mask1[0] = PermMask[HiIndex];
12251 Mask1[2] = PermMask[HiIndex^1];
12253 V2 = DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12255 if (HiIndex >= 2) {
12256 Mask1[0] = PermMask[0];
12257 Mask1[1] = PermMask[1];
12258 Mask1[2] = HiIndex & 1 ? 6 : 4;
12259 Mask1[3] = HiIndex & 1 ? 4 : 6;
12260 return DAG.getVectorShuffle(VT, dl, V1, V2, &Mask1[0]);
12263 Mask1[0] = HiIndex & 1 ? 2 : 0;
12264 Mask1[1] = HiIndex & 1 ? 0 : 2;
12265 Mask1[2] = PermMask[2];
12266 Mask1[3] = PermMask[3];
12271 return DAG.getVectorShuffle(VT, dl, V2, V1, &Mask1[0]);
12274 // Break it into (shuffle shuffle_hi, shuffle_lo).
12275 int LoMask[] = { -1, -1, -1, -1 };
12276 int HiMask[] = { -1, -1, -1, -1 };
12278 int *MaskPtr = LoMask;
12279 unsigned MaskIdx = 0;
12280 unsigned LoIdx = 0;
12281 unsigned HiIdx = 2;
12282 for (unsigned i = 0; i != 4; ++i) {
12289 int Idx = PermMask[i];
12291 Locs[i] = std::make_pair(-1, -1);
12292 } else if (Idx < 4) {
12293 Locs[i] = std::make_pair(MaskIdx, LoIdx);
12294 MaskPtr[LoIdx] = Idx;
12297 Locs[i] = std::make_pair(MaskIdx, HiIdx);
12298 MaskPtr[HiIdx] = Idx;
12303 SDValue LoShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &LoMask[0]);
12304 SDValue HiShuffle = DAG.getVectorShuffle(VT, dl, V1, V2, &HiMask[0]);
12305 int MaskOps[] = { -1, -1, -1, -1 };
12306 for (unsigned i = 0; i != 4; ++i)
12307 if (Locs[i].first != -1)
12308 MaskOps[i] = Locs[i].first * 4 + Locs[i].second;
12309 return DAG.getVectorShuffle(VT, dl, LoShuffle, HiShuffle, &MaskOps[0]);
12312 static bool MayFoldVectorLoad(SDValue V) {
12313 while (V.hasOneUse() && V.getOpcode() == ISD::BITCAST)
12314 V = V.getOperand(0);
12316 if (V.hasOneUse() && V.getOpcode() == ISD::SCALAR_TO_VECTOR)
12317 V = V.getOperand(0);
12318 if (V.hasOneUse() && V.getOpcode() == ISD::BUILD_VECTOR &&
12319 V.getNumOperands() == 2 && V.getOperand(1).getOpcode() == ISD::UNDEF)
12320 // BUILD_VECTOR (load), undef
12321 V = V.getOperand(0);
12323 return MayFoldLoad(V);
12327 SDValue getMOVDDup(SDValue &Op, SDLoc &dl, SDValue V1, SelectionDAG &DAG) {
12328 MVT VT = Op.getSimpleValueType();
12330 // Canonicalize to v2f64.
12331 V1 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, V1);
12332 return DAG.getNode(ISD::BITCAST, dl, VT,
12333 getTargetShuffleNode(X86ISD::MOVDDUP, dl, MVT::v2f64,
12338 SDValue getMOVLowToHigh(SDValue &Op, SDLoc &dl, SelectionDAG &DAG,
12340 SDValue V1 = Op.getOperand(0);
12341 SDValue V2 = Op.getOperand(1);
12342 MVT VT = Op.getSimpleValueType();
12344 assert(VT != MVT::v2i64 && "unsupported shuffle type");
12346 if (HasSSE2 && VT == MVT::v2f64)
12347 return getTargetShuffleNode(X86ISD::MOVLHPD, dl, VT, V1, V2, DAG);
12349 // v4f32 or v4i32: canonicalize to v4f32 (which is legal for SSE1)
12350 return DAG.getNode(ISD::BITCAST, dl, VT,
12351 getTargetShuffleNode(X86ISD::MOVLHPS, dl, MVT::v4f32,
12352 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V1),
12353 DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, V2), DAG));
12357 SDValue getMOVHighToLow(SDValue &Op, SDLoc &dl, SelectionDAG &DAG) {
12358 SDValue V1 = Op.getOperand(0);
12359 SDValue V2 = Op.getOperand(1);
12360 MVT VT = Op.getSimpleValueType();
12362 assert((VT == MVT::v4i32 || VT == MVT::v4f32) &&
12363 "unsupported shuffle type");
12365 if (V2.getOpcode() == ISD::UNDEF)
12369 return getTargetShuffleNode(X86ISD::MOVHLPS, dl, VT, V1, V2, DAG);
12373 SDValue getMOVLP(SDValue &Op, SDLoc &dl, SelectionDAG &DAG, bool HasSSE2) {
12374 SDValue V1 = Op.getOperand(0);
12375 SDValue V2 = Op.getOperand(1);
12376 MVT VT = Op.getSimpleValueType();
12377 unsigned NumElems = VT.getVectorNumElements();
12379 // Use MOVLPS and MOVLPD in case V1 or V2 are loads. During isel, the second
12380 // operand of these instructions is only memory, so check if there's a
12381 // potencial load folding here, otherwise use SHUFPS or MOVSD to match the
12383 bool CanFoldLoad = false;
12385 // Trivial case, when V2 comes from a load.
12386 if (MayFoldVectorLoad(V2))
12387 CanFoldLoad = true;
12389 // When V1 is a load, it can be folded later into a store in isel, example:
12390 // (store (v4f32 (X86Movlps (load addr:$src1), VR128:$src2)), addr:$src1)
12392 // (MOVLPSmr addr:$src1, VR128:$src2)
12393 // So, recognize this potential and also use MOVLPS or MOVLPD
12394 else if (MayFoldVectorLoad(V1) && MayFoldIntoStore(Op))
12395 CanFoldLoad = true;
12397 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12399 if (HasSSE2 && NumElems == 2)
12400 return getTargetShuffleNode(X86ISD::MOVLPD, dl, VT, V1, V2, DAG);
12403 // If we don't care about the second element, proceed to use movss.
12404 if (SVOp->getMaskElt(1) != -1)
12405 return getTargetShuffleNode(X86ISD::MOVLPS, dl, VT, V1, V2, DAG);
12408 // movl and movlp will both match v2i64, but v2i64 is never matched by
12409 // movl earlier because we make it strict to avoid messing with the movlp load
12410 // folding logic (see the code above getMOVLP call). Match it here then,
12411 // this is horrible, but will stay like this until we move all shuffle
12412 // matching to x86 specific nodes. Note that for the 1st condition all
12413 // types are matched with movsd.
12415 // FIXME: isMOVLMask should be checked and matched before getMOVLP,
12416 // as to remove this logic from here, as much as possible
12417 if (NumElems == 2 || !isMOVLMask(SVOp->getMask(), VT))
12418 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12419 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12422 assert(VT != MVT::v4i32 && "unsupported shuffle type");
12424 // Invert the operand order and use SHUFPS to match it.
12425 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V2, V1,
12426 getShuffleSHUFImmediate(SVOp), DAG);
12429 static SDValue NarrowVectorLoadToElement(LoadSDNode *Load, unsigned Index,
12430 SelectionDAG &DAG) {
12432 MVT VT = Load->getSimpleValueType(0);
12433 MVT EVT = VT.getVectorElementType();
12434 SDValue Addr = Load->getOperand(1);
12435 SDValue NewAddr = DAG.getNode(
12436 ISD::ADD, dl, Addr.getSimpleValueType(), Addr,
12437 DAG.getConstant(Index * EVT.getStoreSize(), Addr.getSimpleValueType()));
12440 DAG.getLoad(EVT, dl, Load->getChain(), NewAddr,
12441 DAG.getMachineFunction().getMachineMemOperand(
12442 Load->getMemOperand(), 0, EVT.getStoreSize()));
12446 // It is only safe to call this function if isINSERTPSMask is true for
12447 // this shufflevector mask.
12448 static SDValue getINSERTPS(ShuffleVectorSDNode *SVOp, SDLoc &dl,
12449 SelectionDAG &DAG) {
12450 // Generate an insertps instruction when inserting an f32 from memory onto a
12451 // v4f32 or when copying a member from one v4f32 to another.
12452 // We also use it for transferring i32 from one register to another,
12453 // since it simply copies the same bits.
12454 // If we're transferring an i32 from memory to a specific element in a
12455 // register, we output a generic DAG that will match the PINSRD
12457 MVT VT = SVOp->getSimpleValueType(0);
12458 MVT EVT = VT.getVectorElementType();
12459 SDValue V1 = SVOp->getOperand(0);
12460 SDValue V2 = SVOp->getOperand(1);
12461 auto Mask = SVOp->getMask();
12462 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
12463 "unsupported vector type for insertps/pinsrd");
12465 auto FromV1Predicate = [](const int &i) { return i < 4 && i > -1; };
12466 auto FromV2Predicate = [](const int &i) { return i >= 4; };
12467 int FromV1 = std::count_if(Mask.begin(), Mask.end(), FromV1Predicate);
12471 unsigned DestIndex;
12475 DestIndex = std::find_if(Mask.begin(), Mask.end(), FromV1Predicate) -
12478 // If we have 1 element from each vector, we have to check if we're
12479 // changing V1's element's place. If so, we're done. Otherwise, we
12480 // should assume we're changing V2's element's place and behave
12482 int FromV2 = std::count_if(Mask.begin(), Mask.end(), FromV2Predicate);
12483 assert(DestIndex <= INT32_MAX && "truncated destination index");
12484 if (FromV1 == FromV2 &&
12485 static_cast<int>(DestIndex) == Mask[DestIndex] % 4) {
12489 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12492 assert(std::count_if(Mask.begin(), Mask.end(), FromV2Predicate) == 1 &&
12493 "More than one element from V1 and from V2, or no elements from one "
12494 "of the vectors. This case should not have returned true from "
12499 std::find_if(Mask.begin(), Mask.end(), FromV2Predicate) - Mask.begin();
12502 // Get an index into the source vector in the range [0,4) (the mask is
12503 // in the range [0,8) because it can address V1 and V2)
12504 unsigned SrcIndex = Mask[DestIndex] % 4;
12505 if (MayFoldLoad(From)) {
12506 // Trivial case, when From comes from a load and is only used by the
12507 // shuffle. Make it use insertps from the vector that we need from that
12510 NarrowVectorLoadToElement(cast<LoadSDNode>(From), SrcIndex, DAG);
12511 if (!NewLoad.getNode())
12514 if (EVT == MVT::f32) {
12515 // Create this as a scalar to vector to match the instruction pattern.
12516 SDValue LoadScalarToVector =
12517 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, NewLoad);
12518 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4);
12519 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, LoadScalarToVector,
12521 } else { // EVT == MVT::i32
12522 // If we're getting an i32 from memory, use an INSERT_VECTOR_ELT
12523 // instruction, to match the PINSRD instruction, which loads an i32 to a
12524 // certain vector element.
12525 return DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, To, NewLoad,
12526 DAG.getConstant(DestIndex, MVT::i32));
12530 // Vector-element-to-vector
12531 SDValue InsertpsMask = DAG.getIntPtrConstant(DestIndex << 4 | SrcIndex << 6);
12532 return DAG.getNode(X86ISD::INSERTPS, dl, VT, To, From, InsertpsMask);
12535 // Reduce a vector shuffle to zext.
12536 static SDValue LowerVectorIntExtend(SDValue Op, const X86Subtarget *Subtarget,
12537 SelectionDAG &DAG) {
12538 // PMOVZX is only available from SSE41.
12539 if (!Subtarget->hasSSE41())
12542 MVT VT = Op.getSimpleValueType();
12544 // Only AVX2 support 256-bit vector integer extending.
12545 if (!Subtarget->hasInt256() && VT.is256BitVector())
12548 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12550 SDValue V1 = Op.getOperand(0);
12551 SDValue V2 = Op.getOperand(1);
12552 unsigned NumElems = VT.getVectorNumElements();
12554 // Extending is an unary operation and the element type of the source vector
12555 // won't be equal to or larger than i64.
12556 if (V2.getOpcode() != ISD::UNDEF || !VT.isInteger() ||
12557 VT.getVectorElementType() == MVT::i64)
12560 // Find the expansion ratio, e.g. expanding from i8 to i32 has a ratio of 4.
12561 unsigned Shift = 1; // Start from 2, i.e. 1 << 1.
12562 while ((1U << Shift) < NumElems) {
12563 if (SVOp->getMaskElt(1U << Shift) == 1)
12566 // The maximal ratio is 8, i.e. from i8 to i64.
12571 // Check the shuffle mask.
12572 unsigned Mask = (1U << Shift) - 1;
12573 for (unsigned i = 0; i != NumElems; ++i) {
12574 int EltIdx = SVOp->getMaskElt(i);
12575 if ((i & Mask) != 0 && EltIdx != -1)
12577 if ((i & Mask) == 0 && (unsigned)EltIdx != (i >> Shift))
12581 unsigned NBits = VT.getVectorElementType().getSizeInBits() << Shift;
12582 MVT NeVT = MVT::getIntegerVT(NBits);
12583 MVT NVT = MVT::getVectorVT(NeVT, NumElems >> Shift);
12585 if (!DAG.getTargetLoweringInfo().isTypeLegal(NVT))
12588 return DAG.getNode(ISD::BITCAST, DL, VT,
12589 DAG.getNode(X86ISD::VZEXT, DL, NVT, V1));
12592 static SDValue NormalizeVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
12593 SelectionDAG &DAG) {
12594 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12595 MVT VT = Op.getSimpleValueType();
12597 SDValue V1 = Op.getOperand(0);
12598 SDValue V2 = Op.getOperand(1);
12600 if (isZeroShuffle(SVOp))
12601 return getZeroVector(VT, Subtarget, DAG, dl);
12603 // Handle splat operations
12604 if (SVOp->isSplat()) {
12605 // Use vbroadcast whenever the splat comes from a foldable load
12606 SDValue Broadcast = LowerVectorBroadcast(Op, Subtarget, DAG);
12607 if (Broadcast.getNode())
12611 // Check integer expanding shuffles.
12612 SDValue NewOp = LowerVectorIntExtend(Op, Subtarget, DAG);
12613 if (NewOp.getNode())
12616 // If the shuffle can be profitably rewritten as a narrower shuffle, then
12618 if (VT == MVT::v8i16 || VT == MVT::v16i8 || VT == MVT::v16i16 ||
12619 VT == MVT::v32i8) {
12620 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12621 if (NewOp.getNode())
12622 return DAG.getNode(ISD::BITCAST, dl, VT, NewOp);
12623 } else if (VT.is128BitVector() && Subtarget->hasSSE2()) {
12624 // FIXME: Figure out a cleaner way to do this.
12625 if (ISD::isBuildVectorAllZeros(V2.getNode())) {
12626 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12627 if (NewOp.getNode()) {
12628 MVT NewVT = NewOp.getSimpleValueType();
12629 if (isCommutedMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(),
12630 NewVT, true, false))
12631 return getVZextMovL(VT, NewVT, NewOp.getOperand(0), DAG, Subtarget,
12634 } else if (ISD::isBuildVectorAllZeros(V1.getNode())) {
12635 SDValue NewOp = RewriteAsNarrowerShuffle(SVOp, DAG);
12636 if (NewOp.getNode()) {
12637 MVT NewVT = NewOp.getSimpleValueType();
12638 if (isMOVLMask(cast<ShuffleVectorSDNode>(NewOp)->getMask(), NewVT))
12639 return getVZextMovL(VT, NewVT, NewOp.getOperand(1), DAG, Subtarget,
12648 X86TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const {
12649 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
12650 SDValue V1 = Op.getOperand(0);
12651 SDValue V2 = Op.getOperand(1);
12652 MVT VT = Op.getSimpleValueType();
12654 unsigned NumElems = VT.getVectorNumElements();
12655 bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
12656 bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
12657 bool V1IsSplat = false;
12658 bool V2IsSplat = false;
12659 bool HasSSE2 = Subtarget->hasSSE2();
12660 bool HasFp256 = Subtarget->hasFp256();
12661 bool HasInt256 = Subtarget->hasInt256();
12662 MachineFunction &MF = DAG.getMachineFunction();
12664 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
12666 // Check if we should use the experimental vector shuffle lowering. If so,
12667 // delegate completely to that code path.
12668 if (ExperimentalVectorShuffleLowering)
12669 return lowerVectorShuffle(Op, Subtarget, DAG);
12671 assert(VT.getSizeInBits() != 64 && "Can't lower MMX shuffles");
12673 if (V1IsUndef && V2IsUndef)
12674 return DAG.getUNDEF(VT);
12676 // When we create a shuffle node we put the UNDEF node to second operand,
12677 // but in some cases the first operand may be transformed to UNDEF.
12678 // In this case we should just commute the node.
12680 return DAG.getCommutedVectorShuffle(*SVOp);
12682 // Vector shuffle lowering takes 3 steps:
12684 // 1) Normalize the input vectors. Here splats, zeroed vectors, profitable
12685 // narrowing and commutation of operands should be handled.
12686 // 2) Matching of shuffles with known shuffle masks to x86 target specific
12688 // 3) Rewriting of unmatched masks into new generic shuffle operations,
12689 // so the shuffle can be broken into other shuffles and the legalizer can
12690 // try the lowering again.
12692 // The general idea is that no vector_shuffle operation should be left to
12693 // be matched during isel, all of them must be converted to a target specific
12696 // Normalize the input vectors. Here splats, zeroed vectors, profitable
12697 // narrowing and commutation of operands should be handled. The actual code
12698 // doesn't include all of those, work in progress...
12699 SDValue NewOp = NormalizeVectorShuffle(Op, Subtarget, DAG);
12700 if (NewOp.getNode())
12703 SmallVector<int, 8> M(SVOp->getMask().begin(), SVOp->getMask().end());
12705 // NOTE: isPSHUFDMask can also match both masks below (unpckl_undef and
12706 // unpckh_undef). Only use pshufd if speed is more important than size.
12707 if (OptForSize && isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12708 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12709 if (OptForSize && isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12710 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12712 if (isMOVDDUPMask(M, VT) && Subtarget->hasSSE3() &&
12713 V2IsUndef && MayFoldVectorLoad(V1))
12714 return getMOVDDup(Op, dl, V1, DAG);
12716 if (isMOVHLPS_v_undef_Mask(M, VT))
12717 return getMOVHighToLow(Op, dl, DAG);
12719 // Use to match splats
12720 if (HasSSE2 && isUNPCKHMask(M, VT, HasInt256) && V2IsUndef &&
12721 (VT == MVT::v2f64 || VT == MVT::v2i64))
12722 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12724 if (isPSHUFDMask(M, VT)) {
12725 // The actual implementation will match the mask in the if above and then
12726 // during isel it can match several different instructions, not only pshufd
12727 // as its name says, sad but true, emulate the behavior for now...
12728 if (isMOVDDUPMask(M, VT) && ((VT == MVT::v4f32 || VT == MVT::v2i64)))
12729 return getTargetShuffleNode(X86ISD::MOVLHPS, dl, VT, V1, V1, DAG);
12731 unsigned TargetMask = getShuffleSHUFImmediate(SVOp);
12733 if (HasSSE2 && (VT == MVT::v4f32 || VT == MVT::v4i32))
12734 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1, TargetMask, DAG);
12736 if (HasFp256 && (VT == MVT::v4f32 || VT == MVT::v2f64))
12737 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1, TargetMask,
12740 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V1,
12744 if (isPALIGNRMask(M, VT, Subtarget))
12745 return getTargetShuffleNode(X86ISD::PALIGNR, dl, VT, V1, V2,
12746 getShufflePALIGNRImmediate(SVOp),
12749 if (isVALIGNMask(M, VT, Subtarget))
12750 return getTargetShuffleNode(X86ISD::VALIGN, dl, VT, V1, V2,
12751 getShuffleVALIGNImmediate(SVOp),
12754 // Check if this can be converted into a logical shift.
12755 bool isLeft = false;
12756 unsigned ShAmt = 0;
12758 bool isShift = HasSSE2 && isVectorShift(SVOp, DAG, isLeft, ShVal, ShAmt);
12759 if (isShift && ShVal.hasOneUse()) {
12760 // If the shifted value has multiple uses, it may be cheaper to use
12761 // v_set0 + movlhps or movhlps, etc.
12762 MVT EltVT = VT.getVectorElementType();
12763 ShAmt *= EltVT.getSizeInBits();
12764 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12767 if (isMOVLMask(M, VT)) {
12768 if (ISD::isBuildVectorAllZeros(V1.getNode()))
12769 return getVZextMovL(VT, VT, V2, DAG, Subtarget, dl);
12770 if (!isMOVLPMask(M, VT)) {
12771 if (HasSSE2 && (VT == MVT::v2i64 || VT == MVT::v2f64))
12772 return getTargetShuffleNode(X86ISD::MOVSD, dl, VT, V1, V2, DAG);
12774 if (VT == MVT::v4i32 || VT == MVT::v4f32)
12775 return getTargetShuffleNode(X86ISD::MOVSS, dl, VT, V1, V2, DAG);
12779 // FIXME: fold these into legal mask.
12780 if (isMOVLHPSMask(M, VT) && !isUNPCKLMask(M, VT, HasInt256))
12781 return getMOVLowToHigh(Op, dl, DAG, HasSSE2);
12783 if (isMOVHLPSMask(M, VT))
12784 return getMOVHighToLow(Op, dl, DAG);
12786 if (V2IsUndef && isMOVSHDUPMask(M, VT, Subtarget))
12787 return getTargetShuffleNode(X86ISD::MOVSHDUP, dl, VT, V1, DAG);
12789 if (V2IsUndef && isMOVSLDUPMask(M, VT, Subtarget))
12790 return getTargetShuffleNode(X86ISD::MOVSLDUP, dl, VT, V1, DAG);
12792 if (isMOVLPMask(M, VT))
12793 return getMOVLP(Op, dl, DAG, HasSSE2);
12795 if (ShouldXformToMOVHLPS(M, VT) ||
12796 ShouldXformToMOVLP(V1.getNode(), V2.getNode(), M, VT))
12797 return DAG.getCommutedVectorShuffle(*SVOp);
12800 // No better options. Use a vshldq / vsrldq.
12801 MVT EltVT = VT.getVectorElementType();
12802 ShAmt *= EltVT.getSizeInBits();
12803 return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this, dl);
12806 bool Commuted = false;
12807 // FIXME: This should also accept a bitcast of a splat? Be careful, not
12808 // 1,1,1,1 -> v8i16 though.
12809 BitVector UndefElements;
12810 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V1.getNode()))
12811 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12813 if (auto *BVOp = dyn_cast<BuildVectorSDNode>(V2.getNode()))
12814 if (BVOp->getConstantSplatNode(&UndefElements) && UndefElements.none())
12817 // Canonicalize the splat or undef, if present, to be on the RHS.
12818 if (!V2IsUndef && V1IsSplat && !V2IsSplat) {
12819 CommuteVectorShuffleMask(M, NumElems);
12821 std::swap(V1IsSplat, V2IsSplat);
12825 if (isCommutedMOVLMask(M, VT, V2IsSplat, V2IsUndef)) {
12826 // Shuffling low element of v1 into undef, just return v1.
12829 // If V2 is a splat, the mask may be malformed such as <4,3,3,3>, which
12830 // the instruction selector will not match, so get a canonical MOVL with
12831 // swapped operands to undo the commute.
12832 return getMOVL(DAG, dl, VT, V2, V1);
12835 if (isUNPCKLMask(M, VT, HasInt256))
12836 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12838 if (isUNPCKHMask(M, VT, HasInt256))
12839 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12842 // Normalize mask so all entries that point to V2 points to its first
12843 // element then try to match unpck{h|l} again. If match, return a
12844 // new vector_shuffle with the corrected mask.p
12845 SmallVector<int, 8> NewMask(M.begin(), M.end());
12846 NormalizeMask(NewMask, NumElems);
12847 if (isUNPCKLMask(NewMask, VT, HasInt256, true))
12848 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12849 if (isUNPCKHMask(NewMask, VT, HasInt256, true))
12850 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12854 // Commute is back and try unpck* again.
12855 // FIXME: this seems wrong.
12856 CommuteVectorShuffleMask(M, NumElems);
12858 std::swap(V1IsSplat, V2IsSplat);
12860 if (isUNPCKLMask(M, VT, HasInt256))
12861 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V2, DAG);
12863 if (isUNPCKHMask(M, VT, HasInt256))
12864 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V2, DAG);
12867 // Normalize the node to match x86 shuffle ops if needed
12868 if (!V2IsUndef && (isSHUFPMask(M, VT, /* Commuted */ true)))
12869 return DAG.getCommutedVectorShuffle(*SVOp);
12871 // The checks below are all present in isShuffleMaskLegal, but they are
12872 // inlined here right now to enable us to directly emit target specific
12873 // nodes, and remove one by one until they don't return Op anymore.
12875 if (ShuffleVectorSDNode::isSplatMask(&M[0], VT) &&
12876 SVOp->getSplatIndex() == 0 && V2IsUndef) {
12877 if (VT == MVT::v2f64 || VT == MVT::v2i64)
12878 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12881 if (isPSHUFHWMask(M, VT, HasInt256))
12882 return getTargetShuffleNode(X86ISD::PSHUFHW, dl, VT, V1,
12883 getShufflePSHUFHWImmediate(SVOp),
12886 if (isPSHUFLWMask(M, VT, HasInt256))
12887 return getTargetShuffleNode(X86ISD::PSHUFLW, dl, VT, V1,
12888 getShufflePSHUFLWImmediate(SVOp),
12891 unsigned MaskValue;
12892 if (isBlendMask(M, VT, Subtarget->hasSSE41(), HasInt256, &MaskValue))
12893 return LowerVECTOR_SHUFFLEtoBlend(SVOp, MaskValue, Subtarget, DAG);
12895 if (isSHUFPMask(M, VT))
12896 return getTargetShuffleNode(X86ISD::SHUFP, dl, VT, V1, V2,
12897 getShuffleSHUFImmediate(SVOp), DAG);
12899 if (isUNPCKL_v_undef_Mask(M, VT, HasInt256))
12900 return getTargetShuffleNode(X86ISD::UNPCKL, dl, VT, V1, V1, DAG);
12901 if (isUNPCKH_v_undef_Mask(M, VT, HasInt256))
12902 return getTargetShuffleNode(X86ISD::UNPCKH, dl, VT, V1, V1, DAG);
12904 //===--------------------------------------------------------------------===//
12905 // Generate target specific nodes for 128 or 256-bit shuffles only
12906 // supported in the AVX instruction set.
12909 // Handle VMOVDDUPY permutations
12910 if (V2IsUndef && isMOVDDUPYMask(M, VT, HasFp256))
12911 return getTargetShuffleNode(X86ISD::MOVDDUP, dl, VT, V1, DAG);
12913 // Handle VPERMILPS/D* permutations
12914 if (isVPERMILPMask(M, VT)) {
12915 if ((HasInt256 && VT == MVT::v8i32) || VT == MVT::v16i32)
12916 return getTargetShuffleNode(X86ISD::PSHUFD, dl, VT, V1,
12917 getShuffleSHUFImmediate(SVOp), DAG);
12918 return getTargetShuffleNode(X86ISD::VPERMILPI, dl, VT, V1,
12919 getShuffleSHUFImmediate(SVOp), DAG);
12923 if (VT.is512BitVector() && isINSERT64x4Mask(M, VT, &Idx))
12924 return Insert256BitVector(V1, Extract256BitVector(V2, 0, DAG, dl),
12925 Idx*(NumElems/2), DAG, dl);
12927 // Handle VPERM2F128/VPERM2I128 permutations
12928 if (isVPERM2X128Mask(M, VT, HasFp256))
12929 return getTargetShuffleNode(X86ISD::VPERM2X128, dl, VT, V1,
12930 V2, getShuffleVPERM2X128Immediate(SVOp), DAG);
12932 if (Subtarget->hasSSE41() && isINSERTPSMask(M, VT))
12933 return getINSERTPS(SVOp, dl, DAG);
12936 if (V2IsUndef && HasInt256 && isPermImmMask(M, VT, Imm8))
12937 return getTargetShuffleNode(X86ISD::VPERMI, dl, VT, V1, Imm8, DAG);
12939 if ((V2IsUndef && HasInt256 && VT.is256BitVector() && NumElems == 8) ||
12940 VT.is512BitVector()) {
12941 MVT MaskEltVT = MVT::getIntegerVT(VT.getVectorElementType().getSizeInBits());
12942 MVT MaskVectorVT = MVT::getVectorVT(MaskEltVT, NumElems);
12943 SmallVector<SDValue, 16> permclMask;
12944 for (unsigned i = 0; i != NumElems; ++i) {
12945 permclMask.push_back(DAG.getConstant((M[i]>=0) ? M[i] : 0, MaskEltVT));
12948 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, MaskVectorVT, permclMask);
12950 // Bitcast is for VPERMPS since mask is v8i32 but node takes v8f32
12951 return DAG.getNode(X86ISD::VPERMV, dl, VT,
12952 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V1);
12953 return DAG.getNode(X86ISD::VPERMV3, dl, VT, V1,
12954 DAG.getNode(ISD::BITCAST, dl, VT, Mask), V2);
12957 //===--------------------------------------------------------------------===//
12958 // Since no target specific shuffle was selected for this generic one,
12959 // lower it into other known shuffles. FIXME: this isn't true yet, but
12960 // this is the plan.
12963 // Handle v8i16 specifically since SSE can do byte extraction and insertion.
12964 if (VT == MVT::v8i16) {
12965 SDValue NewOp = LowerVECTOR_SHUFFLEv8i16(Op, Subtarget, DAG);
12966 if (NewOp.getNode())
12970 if (VT == MVT::v16i16 && HasInt256) {
12971 SDValue NewOp = LowerVECTOR_SHUFFLEv16i16(Op, DAG);
12972 if (NewOp.getNode())
12976 if (VT == MVT::v16i8) {
12977 SDValue NewOp = LowerVECTOR_SHUFFLEv16i8(SVOp, Subtarget, DAG);
12978 if (NewOp.getNode())
12982 if (VT == MVT::v32i8) {
12983 SDValue NewOp = LowerVECTOR_SHUFFLEv32i8(SVOp, Subtarget, DAG);
12984 if (NewOp.getNode())
12988 // Handle all 128-bit wide vectors with 4 elements, and match them with
12989 // several different shuffle types.
12990 if (NumElems == 4 && VT.is128BitVector())
12991 return LowerVECTOR_SHUFFLE_128v4(SVOp, DAG);
12993 // Handle general 256-bit shuffles
12994 if (VT.is256BitVector())
12995 return LowerVECTOR_SHUFFLE_256(SVOp, DAG);
13000 // This function assumes its argument is a BUILD_VECTOR of constants or
13001 // undef SDNodes. i.e: ISD::isBuildVectorOfConstantSDNodes(BuildVector) is
13003 static bool BUILD_VECTORtoBlendMask(BuildVectorSDNode *BuildVector,
13004 unsigned &MaskValue) {
13006 unsigned NumElems = BuildVector->getNumOperands();
13007 // There are 2 lanes if (NumElems > 8), and 1 lane otherwise.
13008 unsigned NumLanes = (NumElems - 1) / 8 + 1;
13009 unsigned NumElemsInLane = NumElems / NumLanes;
13011 // Blend for v16i16 should be symetric for the both lanes.
13012 for (unsigned i = 0; i < NumElemsInLane; ++i) {
13013 SDValue EltCond = BuildVector->getOperand(i);
13014 SDValue SndLaneEltCond =
13015 (NumLanes == 2) ? BuildVector->getOperand(i + NumElemsInLane) : EltCond;
13017 int Lane1Cond = -1, Lane2Cond = -1;
13018 if (isa<ConstantSDNode>(EltCond))
13019 Lane1Cond = !isZero(EltCond);
13020 if (isa<ConstantSDNode>(SndLaneEltCond))
13021 Lane2Cond = !isZero(SndLaneEltCond);
13023 if (Lane1Cond == Lane2Cond || Lane2Cond < 0)
13024 // Lane1Cond != 0, means we want the first argument.
13025 // Lane1Cond == 0, means we want the second argument.
13026 // The encoding of this argument is 0 for the first argument, 1
13027 // for the second. Therefore, invert the condition.
13028 MaskValue |= !Lane1Cond << i;
13029 else if (Lane1Cond < 0)
13030 MaskValue |= !Lane2Cond << i;
13037 /// \brief Try to lower a VSELECT instruction to an immediate-controlled blend
13039 static SDValue lowerVSELECTtoBLENDI(SDValue Op, const X86Subtarget *Subtarget,
13040 SelectionDAG &DAG) {
13041 SDValue Cond = Op.getOperand(0);
13042 SDValue LHS = Op.getOperand(1);
13043 SDValue RHS = Op.getOperand(2);
13045 MVT VT = Op.getSimpleValueType();
13046 MVT EltVT = VT.getVectorElementType();
13047 unsigned NumElems = VT.getVectorNumElements();
13049 // There is no blend with immediate in AVX-512.
13050 if (VT.is512BitVector())
13053 if (!Subtarget->hasSSE41() || EltVT == MVT::i8)
13055 if (!Subtarget->hasInt256() && VT == MVT::v16i16)
13058 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
13061 // Check the mask for BLEND and build the value.
13062 unsigned MaskValue = 0;
13063 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
13066 // Convert i32 vectors to floating point if it is not AVX2.
13067 // AVX2 introduced VPBLENDD instruction for 128 and 256-bit vectors.
13069 if (EltVT == MVT::i64 || (EltVT == MVT::i32 && !Subtarget->hasInt256())) {
13070 BlendVT = MVT::getVectorVT(MVT::getFloatingPointVT(EltVT.getSizeInBits()),
13072 LHS = DAG.getNode(ISD::BITCAST, dl, VT, LHS);
13073 RHS = DAG.getNode(ISD::BITCAST, dl, VT, RHS);
13076 SDValue Ret = DAG.getNode(X86ISD::BLENDI, dl, BlendVT, LHS, RHS,
13077 DAG.getConstant(MaskValue, MVT::i32));
13078 return DAG.getNode(ISD::BITCAST, dl, VT, Ret);
13081 SDValue X86TargetLowering::LowerVSELECT(SDValue Op, SelectionDAG &DAG) const {
13082 // A vselect where all conditions and data are constants can be optimized into
13083 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
13084 if (ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(0).getNode()) &&
13085 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(1).getNode()) &&
13086 ISD::isBuildVectorOfConstantSDNodes(Op.getOperand(2).getNode()))
13089 SDValue BlendOp = lowerVSELECTtoBLENDI(Op, Subtarget, DAG);
13090 if (BlendOp.getNode())
13093 // Some types for vselect were previously set to Expand, not Legal or
13094 // Custom. Return an empty SDValue so we fall-through to Expand, after
13095 // the Custom lowering phase.
13096 MVT VT = Op.getSimpleValueType();
13097 switch (VT.SimpleTy) {
13102 if (Subtarget->hasBWI() && Subtarget->hasVLX())
13107 // We couldn't create a "Blend with immediate" node.
13108 // This node should still be legal, but we'll have to emit a blendv*
13113 static SDValue LowerEXTRACT_VECTOR_ELT_SSE4(SDValue Op, SelectionDAG &DAG) {
13114 MVT VT = Op.getSimpleValueType();
13117 if (!Op.getOperand(0).getSimpleValueType().is128BitVector())
13120 if (VT.getSizeInBits() == 8) {
13121 SDValue Extract = DAG.getNode(X86ISD::PEXTRB, dl, MVT::i32,
13122 Op.getOperand(0), Op.getOperand(1));
13123 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13124 DAG.getValueType(VT));
13125 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13128 if (VT.getSizeInBits() == 16) {
13129 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13130 // If Idx is 0, it's cheaper to do a move instead of a pextrw.
13132 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13133 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13134 DAG.getNode(ISD::BITCAST, dl,
13137 Op.getOperand(1)));
13138 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, MVT::i32,
13139 Op.getOperand(0), Op.getOperand(1));
13140 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, MVT::i32, Extract,
13141 DAG.getValueType(VT));
13142 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13145 if (VT == MVT::f32) {
13146 // EXTRACTPS outputs to a GPR32 register which will require a movd to copy
13147 // the result back to FR32 register. It's only worth matching if the
13148 // result has a single use which is a store or a bitcast to i32. And in
13149 // the case of a store, it's not worth it if the index is a constant 0,
13150 // because a MOVSSmr can be used instead, which is smaller and faster.
13151 if (!Op.hasOneUse())
13153 SDNode *User = *Op.getNode()->use_begin();
13154 if ((User->getOpcode() != ISD::STORE ||
13155 (isa<ConstantSDNode>(Op.getOperand(1)) &&
13156 cast<ConstantSDNode>(Op.getOperand(1))->isNullValue())) &&
13157 (User->getOpcode() != ISD::BITCAST ||
13158 User->getValueType(0) != MVT::i32))
13160 SDValue Extract = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13161 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32,
13164 return DAG.getNode(ISD::BITCAST, dl, MVT::f32, Extract);
13167 if (VT == MVT::i32 || VT == MVT::i64) {
13168 // ExtractPS/pextrq works with constant index.
13169 if (isa<ConstantSDNode>(Op.getOperand(1)))
13175 /// Extract one bit from mask vector, like v16i1 or v8i1.
13176 /// AVX-512 feature.
13178 X86TargetLowering::ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const {
13179 SDValue Vec = Op.getOperand(0);
13181 MVT VecVT = Vec.getSimpleValueType();
13182 SDValue Idx = Op.getOperand(1);
13183 MVT EltVT = Op.getSimpleValueType();
13185 assert((EltVT == MVT::i1) && "Unexpected operands in ExtractBitFromMaskVector");
13186 assert((VecVT.getVectorNumElements() <= 16 || Subtarget->hasBWI()) &&
13187 "Unexpected vector type in ExtractBitFromMaskVector");
13189 // variable index can't be handled in mask registers,
13190 // extend vector to VR512
13191 if (!isa<ConstantSDNode>(Idx)) {
13192 MVT ExtVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13193 SDValue Ext = DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVT, Vec);
13194 SDValue Elt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
13195 ExtVT.getVectorElementType(), Ext, Idx);
13196 return DAG.getNode(ISD::TRUNCATE, dl, EltVT, Elt);
13199 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13200 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13201 if (!Subtarget->hasDQI() && (VecVT.getVectorNumElements() <= 8))
13202 rc = getRegClassFor(MVT::v16i1);
13203 unsigned MaxSift = rc->getSize()*8 - 1;
13204 Vec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, Vec,
13205 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13206 Vec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, Vec,
13207 DAG.getConstant(MaxSift, MVT::i8));
13208 return DAG.getNode(X86ISD::VEXTRACT, dl, MVT::i1, Vec,
13209 DAG.getIntPtrConstant(0));
13213 X86TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op,
13214 SelectionDAG &DAG) const {
13216 SDValue Vec = Op.getOperand(0);
13217 MVT VecVT = Vec.getSimpleValueType();
13218 SDValue Idx = Op.getOperand(1);
13220 if (Op.getSimpleValueType() == MVT::i1)
13221 return ExtractBitFromMaskVector(Op, DAG);
13223 if (!isa<ConstantSDNode>(Idx)) {
13224 if (VecVT.is512BitVector() ||
13225 (VecVT.is256BitVector() && Subtarget->hasInt256() &&
13226 VecVT.getVectorElementType().getSizeInBits() == 32)) {
13229 MVT::getIntegerVT(VecVT.getVectorElementType().getSizeInBits());
13230 MVT MaskVT = MVT::getVectorVT(MaskEltVT, VecVT.getSizeInBits() /
13231 MaskEltVT.getSizeInBits());
13233 Idx = DAG.getZExtOrTrunc(Idx, dl, MaskEltVT);
13234 SDValue Mask = DAG.getNode(X86ISD::VINSERT, dl, MaskVT,
13235 getZeroVector(MaskVT, Subtarget, DAG, dl),
13236 Idx, DAG.getConstant(0, getPointerTy()));
13237 SDValue Perm = DAG.getNode(X86ISD::VPERMV, dl, VecVT, Mask, Vec);
13238 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(),
13239 Perm, DAG.getConstant(0, getPointerTy()));
13244 // If this is a 256-bit vector result, first extract the 128-bit vector and
13245 // then extract the element from the 128-bit vector.
13246 if (VecVT.is256BitVector() || VecVT.is512BitVector()) {
13248 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13249 // Get the 128-bit vector.
13250 Vec = Extract128BitVector(Vec, IdxVal, DAG, dl);
13251 MVT EltVT = VecVT.getVectorElementType();
13253 unsigned ElemsPerChunk = 128 / EltVT.getSizeInBits();
13255 //if (IdxVal >= NumElems/2)
13256 // IdxVal -= NumElems/2;
13257 IdxVal -= (IdxVal/ElemsPerChunk)*ElemsPerChunk;
13258 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, Op.getValueType(), Vec,
13259 DAG.getConstant(IdxVal, MVT::i32));
13262 assert(VecVT.is128BitVector() && "Unexpected vector length");
13264 if (Subtarget->hasSSE41()) {
13265 SDValue Res = LowerEXTRACT_VECTOR_ELT_SSE4(Op, DAG);
13270 MVT VT = Op.getSimpleValueType();
13271 // TODO: handle v16i8.
13272 if (VT.getSizeInBits() == 16) {
13273 SDValue Vec = Op.getOperand(0);
13274 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13276 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i16,
13277 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i32,
13278 DAG.getNode(ISD::BITCAST, dl,
13280 Op.getOperand(1)));
13281 // Transform it so it match pextrw which produces a 32-bit result.
13282 MVT EltVT = MVT::i32;
13283 SDValue Extract = DAG.getNode(X86ISD::PEXTRW, dl, EltVT,
13284 Op.getOperand(0), Op.getOperand(1));
13285 SDValue Assert = DAG.getNode(ISD::AssertZext, dl, EltVT, Extract,
13286 DAG.getValueType(VT));
13287 return DAG.getNode(ISD::TRUNCATE, dl, VT, Assert);
13290 if (VT.getSizeInBits() == 32) {
13291 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13295 // SHUFPS the element to the lowest double word, then movss.
13296 int Mask[4] = { static_cast<int>(Idx), -1, -1, -1 };
13297 MVT VVT = Op.getOperand(0).getSimpleValueType();
13298 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13299 DAG.getUNDEF(VVT), Mask);
13300 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13301 DAG.getIntPtrConstant(0));
13304 if (VT.getSizeInBits() == 64) {
13305 // FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
13306 // FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
13307 // to match extract_elt for f64.
13308 unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
13312 // UNPCKHPD the element to the lowest double word, then movsd.
13313 // Note if the lower 64 bits of the result of the UNPCKHPD is then stored
13314 // to a f64mem, the whole operation is folded into a single MOVHPDmr.
13315 int Mask[2] = { 1, -1 };
13316 MVT VVT = Op.getOperand(0).getSimpleValueType();
13317 SDValue Vec = DAG.getVectorShuffle(VVT, dl, Op.getOperand(0),
13318 DAG.getUNDEF(VVT), Mask);
13319 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, VT, Vec,
13320 DAG.getIntPtrConstant(0));
13326 /// Insert one bit to mask vector, like v16i1 or v8i1.
13327 /// AVX-512 feature.
13329 X86TargetLowering::InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const {
13331 SDValue Vec = Op.getOperand(0);
13332 SDValue Elt = Op.getOperand(1);
13333 SDValue Idx = Op.getOperand(2);
13334 MVT VecVT = Vec.getSimpleValueType();
13336 if (!isa<ConstantSDNode>(Idx)) {
13337 // Non constant index. Extend source and destination,
13338 // insert element and then truncate the result.
13339 MVT ExtVecVT = (VecVT == MVT::v8i1 ? MVT::v8i64 : MVT::v16i32);
13340 MVT ExtEltVT = (VecVT == MVT::v8i1 ? MVT::i64 : MVT::i32);
13341 SDValue ExtOp = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, ExtVecVT,
13342 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtVecVT, Vec),
13343 DAG.getNode(ISD::ZERO_EXTEND, dl, ExtEltVT, Elt), Idx);
13344 return DAG.getNode(ISD::TRUNCATE, dl, VecVT, ExtOp);
13347 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13348 SDValue EltInVec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VecVT, Elt);
13349 if (Vec.getOpcode() == ISD::UNDEF)
13350 return DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13351 DAG.getConstant(IdxVal, MVT::i8));
13352 const TargetRegisterClass* rc = getRegClassFor(VecVT);
13353 unsigned MaxSift = rc->getSize()*8 - 1;
13354 EltInVec = DAG.getNode(X86ISD::VSHLI, dl, VecVT, EltInVec,
13355 DAG.getConstant(MaxSift, MVT::i8));
13356 EltInVec = DAG.getNode(X86ISD::VSRLI, dl, VecVT, EltInVec,
13357 DAG.getConstant(MaxSift - IdxVal, MVT::i8));
13358 return DAG.getNode(ISD::OR, dl, VecVT, Vec, EltInVec);
13361 SDValue X86TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op,
13362 SelectionDAG &DAG) const {
13363 MVT VT = Op.getSimpleValueType();
13364 MVT EltVT = VT.getVectorElementType();
13366 if (EltVT == MVT::i1)
13367 return InsertBitToMaskVector(Op, DAG);
13370 SDValue N0 = Op.getOperand(0);
13371 SDValue N1 = Op.getOperand(1);
13372 SDValue N2 = Op.getOperand(2);
13373 if (!isa<ConstantSDNode>(N2))
13375 auto *N2C = cast<ConstantSDNode>(N2);
13376 unsigned IdxVal = N2C->getZExtValue();
13378 // If the vector is wider than 128 bits, extract the 128-bit subvector, insert
13379 // into that, and then insert the subvector back into the result.
13380 if (VT.is256BitVector() || VT.is512BitVector()) {
13381 // Get the desired 128-bit vector half.
13382 SDValue V = Extract128BitVector(N0, IdxVal, DAG, dl);
13384 // Insert the element into the desired half.
13385 unsigned NumEltsIn128 = 128 / EltVT.getSizeInBits();
13386 unsigned IdxIn128 = IdxVal - (IdxVal / NumEltsIn128) * NumEltsIn128;
13388 V = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, V.getValueType(), V, N1,
13389 DAG.getConstant(IdxIn128, MVT::i32));
13391 // Insert the changed part back to the 256-bit vector
13392 return Insert128BitVector(N0, V, IdxVal, DAG, dl);
13394 assert(VT.is128BitVector() && "Only 128-bit vector types should be left!");
13396 if (Subtarget->hasSSE41()) {
13397 if (EltVT.getSizeInBits() == 8 || EltVT.getSizeInBits() == 16) {
13399 if (VT == MVT::v8i16) {
13400 Opc = X86ISD::PINSRW;
13402 assert(VT == MVT::v16i8);
13403 Opc = X86ISD::PINSRB;
13406 // Transform it so it match pinsr{b,w} which expects a GR32 as its second
13408 if (N1.getValueType() != MVT::i32)
13409 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13410 if (N2.getValueType() != MVT::i32)
13411 N2 = DAG.getIntPtrConstant(IdxVal);
13412 return DAG.getNode(Opc, dl, VT, N0, N1, N2);
13415 if (EltVT == MVT::f32) {
13416 // Bits [7:6] of the constant are the source select. This will always be
13417 // zero here. The DAG Combiner may combine an extract_elt index into
13419 // bits. For example (insert (extract, 3), 2) could be matched by
13421 // the '3' into bits [7:6] of X86ISD::INSERTPS.
13422 // Bits [5:4] of the constant are the destination select. This is the
13423 // value of the incoming immediate.
13424 // Bits [3:0] of the constant are the zero mask. The DAG Combiner may
13425 // combine either bitwise AND or insert of float 0.0 to set these bits.
13426 N2 = DAG.getIntPtrConstant(IdxVal << 4);
13427 // Create this as a scalar to vector..
13428 N1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4f32, N1);
13429 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N0, N1, N2);
13432 if (EltVT == MVT::i32 || EltVT == MVT::i64) {
13433 // PINSR* works with constant index.
13438 if (EltVT == MVT::i8)
13441 if (EltVT.getSizeInBits() == 16) {
13442 // Transform it so it match pinsrw which expects a 16-bit value in a GR32
13443 // as its second argument.
13444 if (N1.getValueType() != MVT::i32)
13445 N1 = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, N1);
13446 if (N2.getValueType() != MVT::i32)
13447 N2 = DAG.getIntPtrConstant(IdxVal);
13448 return DAG.getNode(X86ISD::PINSRW, dl, VT, N0, N1, N2);
13453 static SDValue LowerSCALAR_TO_VECTOR(SDValue Op, SelectionDAG &DAG) {
13455 MVT OpVT = Op.getSimpleValueType();
13457 // If this is a 256-bit vector result, first insert into a 128-bit
13458 // vector and then insert into the 256-bit vector.
13459 if (!OpVT.is128BitVector()) {
13460 // Insert into a 128-bit vector.
13461 unsigned SizeFactor = OpVT.getSizeInBits()/128;
13462 MVT VT128 = MVT::getVectorVT(OpVT.getVectorElementType(),
13463 OpVT.getVectorNumElements() / SizeFactor);
13465 Op = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT128, Op.getOperand(0));
13467 // Insert the 128-bit vector.
13468 return Insert128BitVector(DAG.getUNDEF(OpVT), Op, 0, DAG, dl);
13471 if (OpVT == MVT::v1i64 &&
13472 Op.getOperand(0).getValueType() == MVT::i64)
13473 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v1i64, Op.getOperand(0));
13475 SDValue AnyExt = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, Op.getOperand(0));
13476 assert(OpVT.is128BitVector() && "Expected an SSE type!");
13477 return DAG.getNode(ISD::BITCAST, dl, OpVT,
13478 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,AnyExt));
13481 // Lower a node with an EXTRACT_SUBVECTOR opcode. This may result in
13482 // a simple subregister reference or explicit instructions to grab
13483 // upper bits of a vector.
13484 static SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13485 SelectionDAG &DAG) {
13487 SDValue In = Op.getOperand(0);
13488 SDValue Idx = Op.getOperand(1);
13489 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13490 MVT ResVT = Op.getSimpleValueType();
13491 MVT InVT = In.getSimpleValueType();
13493 if (Subtarget->hasFp256()) {
13494 if (ResVT.is128BitVector() &&
13495 (InVT.is256BitVector() || InVT.is512BitVector()) &&
13496 isa<ConstantSDNode>(Idx)) {
13497 return Extract128BitVector(In, IdxVal, DAG, dl);
13499 if (ResVT.is256BitVector() && InVT.is512BitVector() &&
13500 isa<ConstantSDNode>(Idx)) {
13501 return Extract256BitVector(In, IdxVal, DAG, dl);
13507 // Lower a node with an INSERT_SUBVECTOR opcode. This may result in a
13508 // simple superregister reference or explicit instructions to insert
13509 // the upper bits of a vector.
13510 static SDValue LowerINSERT_SUBVECTOR(SDValue Op, const X86Subtarget *Subtarget,
13511 SelectionDAG &DAG) {
13512 if (!Subtarget->hasAVX())
13516 SDValue Vec = Op.getOperand(0);
13517 SDValue SubVec = Op.getOperand(1);
13518 SDValue Idx = Op.getOperand(2);
13520 if (!isa<ConstantSDNode>(Idx))
13523 unsigned IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
13524 MVT OpVT = Op.getSimpleValueType();
13525 MVT SubVecVT = SubVec.getSimpleValueType();
13527 // Fold two 16-byte subvector loads into one 32-byte load:
13528 // (insert_subvector (insert_subvector undef, (load addr), 0),
13529 // (load addr + 16), Elts/2)
13531 if ((IdxVal == OpVT.getVectorNumElements() / 2) &&
13532 Vec.getOpcode() == ISD::INSERT_SUBVECTOR &&
13533 OpVT.is256BitVector() && SubVecVT.is128BitVector() &&
13534 !Subtarget->isUnalignedMem32Slow()) {
13535 SDValue SubVec2 = Vec.getOperand(1);
13536 if (auto *Idx2 = dyn_cast<ConstantSDNode>(Vec.getOperand(2))) {
13537 if (Idx2->getZExtValue() == 0) {
13538 SDValue Ops[] = { SubVec2, SubVec };
13539 SDValue LD = EltsFromConsecutiveLoads(OpVT, Ops, dl, DAG, false);
13546 if ((OpVT.is256BitVector() || OpVT.is512BitVector()) &&
13547 SubVecVT.is128BitVector())
13548 return Insert128BitVector(Vec, SubVec, IdxVal, DAG, dl);
13550 if (OpVT.is512BitVector() && SubVecVT.is256BitVector())
13551 return Insert256BitVector(Vec, SubVec, IdxVal, DAG, dl);
13556 // ConstantPool, JumpTable, GlobalAddress, and ExternalSymbol are lowered as
13557 // their target countpart wrapped in the X86ISD::Wrapper node. Suppose N is
13558 // one of the above mentioned nodes. It has to be wrapped because otherwise
13559 // Select(N) returns N. So the raw TargetGlobalAddress nodes, etc. can only
13560 // be used to form addressing mode. These wrapped nodes will be selected
13563 X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
13564 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
13566 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13567 // global base reg.
13568 unsigned char OpFlag = 0;
13569 unsigned WrapperKind = X86ISD::Wrapper;
13570 CodeModel::Model M = DAG.getTarget().getCodeModel();
13572 if (Subtarget->isPICStyleRIPRel() &&
13573 (M == CodeModel::Small || M == CodeModel::Kernel))
13574 WrapperKind = X86ISD::WrapperRIP;
13575 else if (Subtarget->isPICStyleGOT())
13576 OpFlag = X86II::MO_GOTOFF;
13577 else if (Subtarget->isPICStyleStubPIC())
13578 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13580 SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
13581 CP->getAlignment(),
13582 CP->getOffset(), OpFlag);
13584 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13585 // With PIC, the address is actually $g + Offset.
13587 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13588 DAG.getNode(X86ISD::GlobalBaseReg,
13589 SDLoc(), getPointerTy()),
13596 SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) const {
13597 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
13599 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13600 // global base reg.
13601 unsigned char OpFlag = 0;
13602 unsigned WrapperKind = X86ISD::Wrapper;
13603 CodeModel::Model M = DAG.getTarget().getCodeModel();
13605 if (Subtarget->isPICStyleRIPRel() &&
13606 (M == CodeModel::Small || M == CodeModel::Kernel))
13607 WrapperKind = X86ISD::WrapperRIP;
13608 else if (Subtarget->isPICStyleGOT())
13609 OpFlag = X86II::MO_GOTOFF;
13610 else if (Subtarget->isPICStyleStubPIC())
13611 OpFlag = X86II::MO_PIC_BASE_OFFSET;
13613 SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
13616 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13618 // With PIC, the address is actually $g + Offset.
13620 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13621 DAG.getNode(X86ISD::GlobalBaseReg,
13622 SDLoc(), getPointerTy()),
13629 X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const {
13630 const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
13632 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13633 // global base reg.
13634 unsigned char OpFlag = 0;
13635 unsigned WrapperKind = X86ISD::Wrapper;
13636 CodeModel::Model M = DAG.getTarget().getCodeModel();
13638 if (Subtarget->isPICStyleRIPRel() &&
13639 (M == CodeModel::Small || M == CodeModel::Kernel)) {
13640 if (Subtarget->isTargetDarwin() || Subtarget->isTargetELF())
13641 OpFlag = X86II::MO_GOTPCREL;
13642 WrapperKind = X86ISD::WrapperRIP;
13643 } else if (Subtarget->isPICStyleGOT()) {
13644 OpFlag = X86II::MO_GOT;
13645 } else if (Subtarget->isPICStyleStubPIC()) {
13646 OpFlag = X86II::MO_DARWIN_NONLAZY_PIC_BASE;
13647 } else if (Subtarget->isPICStyleStubNoDynamic()) {
13648 OpFlag = X86II::MO_DARWIN_NONLAZY;
13651 SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
13654 Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13656 // With PIC, the address is actually $g + Offset.
13657 if (DAG.getTarget().getRelocationModel() == Reloc::PIC_ &&
13658 !Subtarget->is64Bit()) {
13659 Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13660 DAG.getNode(X86ISD::GlobalBaseReg,
13661 SDLoc(), getPointerTy()),
13665 // For symbols that require a load from a stub to get the address, emit the
13667 if (isGlobalStubReference(OpFlag))
13668 Result = DAG.getLoad(getPointerTy(), DL, DAG.getEntryNode(), Result,
13669 MachinePointerInfo::getGOT(), false, false, false, 0);
13675 X86TargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
13676 // Create the TargetBlockAddressAddress node.
13677 unsigned char OpFlags =
13678 Subtarget->ClassifyBlockAddressReference();
13679 CodeModel::Model M = DAG.getTarget().getCodeModel();
13680 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
13681 int64_t Offset = cast<BlockAddressSDNode>(Op)->getOffset();
13683 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy(), Offset,
13686 if (Subtarget->isPICStyleRIPRel() &&
13687 (M == CodeModel::Small || M == CodeModel::Kernel))
13688 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13690 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13692 // With PIC, the address is actually $g + Offset.
13693 if (isGlobalRelativeToPICBase(OpFlags)) {
13694 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13695 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13703 X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, SDLoc dl,
13704 int64_t Offset, SelectionDAG &DAG) const {
13705 // Create the TargetGlobalAddress node, folding in the constant
13706 // offset if it is legal.
13707 unsigned char OpFlags =
13708 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget());
13709 CodeModel::Model M = DAG.getTarget().getCodeModel();
13711 if (OpFlags == X86II::MO_NO_FLAG &&
13712 X86::isOffsetSuitableForCodeModel(Offset, M)) {
13713 // A direct static reference to a global.
13714 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
13717 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), 0, OpFlags);
13720 if (Subtarget->isPICStyleRIPRel() &&
13721 (M == CodeModel::Small || M == CodeModel::Kernel))
13722 Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
13724 Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
13726 // With PIC, the address is actually $g + Offset.
13727 if (isGlobalRelativeToPICBase(OpFlags)) {
13728 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
13729 DAG.getNode(X86ISD::GlobalBaseReg, dl, getPointerTy()),
13733 // For globals that require a load from a stub to get the address, emit the
13735 if (isGlobalStubReference(OpFlags))
13736 Result = DAG.getLoad(getPointerTy(), dl, DAG.getEntryNode(), Result,
13737 MachinePointerInfo::getGOT(), false, false, false, 0);
13739 // If there was a non-zero offset that we didn't fold, create an explicit
13740 // addition for it.
13742 Result = DAG.getNode(ISD::ADD, dl, getPointerTy(), Result,
13743 DAG.getConstant(Offset, getPointerTy()));
13749 X86TargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
13750 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
13751 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
13752 return LowerGlobalAddress(GV, SDLoc(Op), Offset, DAG);
13756 GetTLSADDR(SelectionDAG &DAG, SDValue Chain, GlobalAddressSDNode *GA,
13757 SDValue *InFlag, const EVT PtrVT, unsigned ReturnReg,
13758 unsigned char OperandFlags, bool LocalDynamic = false) {
13759 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13760 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13762 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13763 GA->getValueType(0),
13767 X86ISD::NodeType CallType = LocalDynamic ? X86ISD::TLSBASEADDR
13771 SDValue Ops[] = { Chain, TGA, *InFlag };
13772 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13774 SDValue Ops[] = { Chain, TGA };
13775 Chain = DAG.getNode(CallType, dl, NodeTys, Ops);
13778 // TLSADDR will be codegen'ed as call. Inform MFI that function has calls.
13779 MFI->setAdjustsStack(true);
13780 MFI->setHasCalls(true);
13782 SDValue Flag = Chain.getValue(1);
13783 return DAG.getCopyFromReg(Chain, dl, ReturnReg, PtrVT, Flag);
13786 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
13788 LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13791 SDLoc dl(GA); // ? function entry point might be better
13792 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13793 DAG.getNode(X86ISD::GlobalBaseReg,
13794 SDLoc(), PtrVT), InFlag);
13795 InFlag = Chain.getValue(1);
13797 return GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX, X86II::MO_TLSGD);
13800 // Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
13802 LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13804 return GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT,
13805 X86::RAX, X86II::MO_TLSGD);
13808 static SDValue LowerToTLSLocalDynamicModel(GlobalAddressSDNode *GA,
13814 // Get the start address of the TLS block for this module.
13815 X86MachineFunctionInfo* MFI = DAG.getMachineFunction()
13816 .getInfo<X86MachineFunctionInfo>();
13817 MFI->incNumLocalDynamicTLSAccesses();
13821 Base = GetTLSADDR(DAG, DAG.getEntryNode(), GA, nullptr, PtrVT, X86::RAX,
13822 X86II::MO_TLSLD, /*LocalDynamic=*/true);
13825 SDValue Chain = DAG.getCopyToReg(DAG.getEntryNode(), dl, X86::EBX,
13826 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT), InFlag);
13827 InFlag = Chain.getValue(1);
13828 Base = GetTLSADDR(DAG, Chain, GA, &InFlag, PtrVT, X86::EAX,
13829 X86II::MO_TLSLDM, /*LocalDynamic=*/true);
13832 // Note: the CleanupLocalDynamicTLSPass will remove redundant computations
13836 unsigned char OperandFlags = X86II::MO_DTPOFF;
13837 unsigned WrapperKind = X86ISD::Wrapper;
13838 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
13839 GA->getValueType(0),
13840 GA->getOffset(), OperandFlags);
13841 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13843 // Add x@dtpoff with the base.
13844 return DAG.getNode(ISD::ADD, dl, PtrVT, Offset, Base);
13847 // Lower ISD::GlobalTLSAddress using the "initial exec" or "local exec" model.
13848 static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
13849 const EVT PtrVT, TLSModel::Model model,
13850 bool is64Bit, bool isPIC) {
13853 // Get the Thread Pointer, which is %gs:0 (32-bit) or %fs:0 (64-bit).
13854 Value *Ptr = Constant::getNullValue(Type::getInt8PtrTy(*DAG.getContext(),
13855 is64Bit ? 257 : 256));
13857 SDValue ThreadPointer =
13858 DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), DAG.getIntPtrConstant(0),
13859 MachinePointerInfo(Ptr), false, false, false, 0);
13861 unsigned char OperandFlags = 0;
13862 // Most TLS accesses are not RIP relative, even on x86-64. One exception is
13864 unsigned WrapperKind = X86ISD::Wrapper;
13865 if (model == TLSModel::LocalExec) {
13866 OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
13867 } else if (model == TLSModel::InitialExec) {
13869 OperandFlags = X86II::MO_GOTTPOFF;
13870 WrapperKind = X86ISD::WrapperRIP;
13872 OperandFlags = isPIC ? X86II::MO_GOTNTPOFF : X86II::MO_INDNTPOFF;
13875 llvm_unreachable("Unexpected model");
13878 // emit "addl x@ntpoff,%eax" (local exec)
13879 // or "addl x@indntpoff,%eax" (initial exec)
13880 // or "addl x@gotntpoff(%ebx) ,%eax" (initial exec, 32-bit pic)
13882 DAG.getTargetGlobalAddress(GA->getGlobal(), dl, GA->getValueType(0),
13883 GA->getOffset(), OperandFlags);
13884 SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
13886 if (model == TLSModel::InitialExec) {
13887 if (isPIC && !is64Bit) {
13888 Offset = DAG.getNode(ISD::ADD, dl, PtrVT,
13889 DAG.getNode(X86ISD::GlobalBaseReg, SDLoc(), PtrVT),
13893 Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
13894 MachinePointerInfo::getGOT(), false, false, false, 0);
13897 // The address of the thread local variable is the add of the thread
13898 // pointer with the offset of the variable.
13899 return DAG.getNode(ISD::ADD, dl, PtrVT, ThreadPointer, Offset);
13903 X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const {
13905 GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op);
13906 const GlobalValue *GV = GA->getGlobal();
13908 if (Subtarget->isTargetELF()) {
13909 TLSModel::Model model = DAG.getTarget().getTLSModel(GV);
13912 case TLSModel::GeneralDynamic:
13913 if (Subtarget->is64Bit())
13914 return LowerToTLSGeneralDynamicModel64(GA, DAG, getPointerTy());
13915 return LowerToTLSGeneralDynamicModel32(GA, DAG, getPointerTy());
13916 case TLSModel::LocalDynamic:
13917 return LowerToTLSLocalDynamicModel(GA, DAG, getPointerTy(),
13918 Subtarget->is64Bit());
13919 case TLSModel::InitialExec:
13920 case TLSModel::LocalExec:
13921 return LowerToTLSExecModel(
13922 GA, DAG, getPointerTy(), model, Subtarget->is64Bit(),
13923 DAG.getTarget().getRelocationModel() == Reloc::PIC_);
13925 llvm_unreachable("Unknown TLS model.");
13928 if (Subtarget->isTargetDarwin()) {
13929 // Darwin only has one model of TLS. Lower to that.
13930 unsigned char OpFlag = 0;
13931 unsigned WrapperKind = Subtarget->isPICStyleRIPRel() ?
13932 X86ISD::WrapperRIP : X86ISD::Wrapper;
13934 // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
13935 // global base reg.
13936 bool PIC32 = (DAG.getTarget().getRelocationModel() == Reloc::PIC_) &&
13937 !Subtarget->is64Bit();
13939 OpFlag = X86II::MO_TLVP_PIC_BASE;
13941 OpFlag = X86II::MO_TLVP;
13943 SDValue Result = DAG.getTargetGlobalAddress(GA->getGlobal(), DL,
13944 GA->getValueType(0),
13945 GA->getOffset(), OpFlag);
13946 SDValue Offset = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
13948 // With PIC32, the address is actually $g + Offset.
13950 Offset = DAG.getNode(ISD::ADD, DL, getPointerTy(),
13951 DAG.getNode(X86ISD::GlobalBaseReg,
13952 SDLoc(), getPointerTy()),
13955 // Lowering the machine isd will make sure everything is in the right
13957 SDValue Chain = DAG.getEntryNode();
13958 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
13959 SDValue Args[] = { Chain, Offset };
13960 Chain = DAG.getNode(X86ISD::TLSCALL, DL, NodeTys, Args);
13962 // TLSCALL will be codegen'ed as call. Inform MFI that function has calls.
13963 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
13964 MFI->setAdjustsStack(true);
13966 // And our return value (tls address) is in the standard call return value
13968 unsigned Reg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
13969 return DAG.getCopyFromReg(Chain, DL, Reg, getPointerTy(),
13970 Chain.getValue(1));
13973 if (Subtarget->isTargetKnownWindowsMSVC() ||
13974 Subtarget->isTargetWindowsGNU()) {
13975 // Just use the implicit TLS architecture
13976 // Need to generate someting similar to:
13977 // mov rdx, qword [gs:abs 58H]; Load pointer to ThreadLocalStorage
13979 // mov ecx, dword [rel _tls_index]: Load index (from C runtime)
13980 // mov rcx, qword [rdx+rcx*8]
13981 // mov eax, .tls$:tlsvar
13982 // [rax+rcx] contains the address
13983 // Windows 64bit: gs:0x58
13984 // Windows 32bit: fs:__tls_array
13987 SDValue Chain = DAG.getEntryNode();
13989 // Get the Thread Pointer, which is %fs:__tls_array (32-bit) or
13990 // %gs:0x58 (64-bit). On MinGW, __tls_array is not available, so directly
13991 // use its literal value of 0x2C.
13992 Value *Ptr = Constant::getNullValue(Subtarget->is64Bit()
13993 ? Type::getInt8PtrTy(*DAG.getContext(),
13995 : Type::getInt32PtrTy(*DAG.getContext(),
13999 Subtarget->is64Bit()
14000 ? DAG.getIntPtrConstant(0x58)
14001 : (Subtarget->isTargetWindowsGNU()
14002 ? DAG.getIntPtrConstant(0x2C)
14003 : DAG.getExternalSymbol("_tls_array", getPointerTy()));
14005 SDValue ThreadPointer =
14006 DAG.getLoad(getPointerTy(), dl, Chain, TlsArray,
14007 MachinePointerInfo(Ptr), false, false, false, 0);
14009 // Load the _tls_index variable
14010 SDValue IDX = DAG.getExternalSymbol("_tls_index", getPointerTy());
14011 if (Subtarget->is64Bit())
14012 IDX = DAG.getExtLoad(ISD::ZEXTLOAD, dl, getPointerTy(), Chain,
14013 IDX, MachinePointerInfo(), MVT::i32,
14014 false, false, false, 0);
14016 IDX = DAG.getLoad(getPointerTy(), dl, Chain, IDX, MachinePointerInfo(),
14017 false, false, false, 0);
14019 SDValue Scale = DAG.getConstant(Log2_64_Ceil(TD->getPointerSize()),
14021 IDX = DAG.getNode(ISD::SHL, dl, getPointerTy(), IDX, Scale);
14023 SDValue res = DAG.getNode(ISD::ADD, dl, getPointerTy(), ThreadPointer, IDX);
14024 res = DAG.getLoad(getPointerTy(), dl, Chain, res, MachinePointerInfo(),
14025 false, false, false, 0);
14027 // Get the offset of start of .tls section
14028 SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), dl,
14029 GA->getValueType(0),
14030 GA->getOffset(), X86II::MO_SECREL);
14031 SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), TGA);
14033 // The address of the thread local variable is the add of the thread
14034 // pointer with the offset of the variable.
14035 return DAG.getNode(ISD::ADD, dl, getPointerTy(), res, Offset);
14038 llvm_unreachable("TLS not implemented for this target.");
14041 /// LowerShiftParts - Lower SRA_PARTS and friends, which return two i32 values
14042 /// and take a 2 x i32 value to shift plus a shift amount.
14043 static SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) {
14044 assert(Op.getNumOperands() == 3 && "Not a double-shift!");
14045 MVT VT = Op.getSimpleValueType();
14046 unsigned VTBits = VT.getSizeInBits();
14048 bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
14049 SDValue ShOpLo = Op.getOperand(0);
14050 SDValue ShOpHi = Op.getOperand(1);
14051 SDValue ShAmt = Op.getOperand(2);
14052 // X86ISD::SHLD and X86ISD::SHRD have defined overflow behavior but the
14053 // generic ISD nodes haven't. Insert an AND to be safe, it's optimized away
14055 SDValue SafeShAmt = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14056 DAG.getConstant(VTBits - 1, MVT::i8));
14057 SDValue Tmp1 = isSRA ? DAG.getNode(ISD::SRA, dl, VT, ShOpHi,
14058 DAG.getConstant(VTBits - 1, MVT::i8))
14059 : DAG.getConstant(0, VT);
14061 SDValue Tmp2, Tmp3;
14062 if (Op.getOpcode() == ISD::SHL_PARTS) {
14063 Tmp2 = DAG.getNode(X86ISD::SHLD, dl, VT, ShOpHi, ShOpLo, ShAmt);
14064 Tmp3 = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, SafeShAmt);
14066 Tmp2 = DAG.getNode(X86ISD::SHRD, dl, VT, ShOpLo, ShOpHi, ShAmt);
14067 Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, dl, VT, ShOpHi, SafeShAmt);
14070 // If the shift amount is larger or equal than the width of a part we can't
14071 // rely on the results of shld/shrd. Insert a test and select the appropriate
14072 // values for large shift amounts.
14073 SDValue AndNode = DAG.getNode(ISD::AND, dl, MVT::i8, ShAmt,
14074 DAG.getConstant(VTBits, MVT::i8));
14075 SDValue Cond = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
14076 AndNode, DAG.getConstant(0, MVT::i8));
14079 SDValue CC = DAG.getConstant(X86::COND_NE, MVT::i8);
14080 SDValue Ops0[4] = { Tmp2, Tmp3, CC, Cond };
14081 SDValue Ops1[4] = { Tmp3, Tmp1, CC, Cond };
14083 if (Op.getOpcode() == ISD::SHL_PARTS) {
14084 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14085 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14087 Lo = DAG.getNode(X86ISD::CMOV, dl, VT, Ops0);
14088 Hi = DAG.getNode(X86ISD::CMOV, dl, VT, Ops1);
14091 SDValue Ops[2] = { Lo, Hi };
14092 return DAG.getMergeValues(Ops, dl);
14095 SDValue X86TargetLowering::LowerSINT_TO_FP(SDValue Op,
14096 SelectionDAG &DAG) const {
14097 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
14100 if (SrcVT.isVector()) {
14101 if (SrcVT.getVectorElementType() == MVT::i1) {
14102 MVT IntegerVT = MVT::getVectorVT(MVT::i32, SrcVT.getVectorNumElements());
14103 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14104 DAG.getNode(ISD::SIGN_EXTEND, dl, IntegerVT,
14105 Op.getOperand(0)));
14110 assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
14111 "Unknown SINT_TO_FP to lower!");
14113 // These are really Legal; return the operand so the caller accepts it as
14115 if (SrcVT == MVT::i32 && isScalarFPTypeInSSEReg(Op.getValueType()))
14117 if (SrcVT == MVT::i64 && isScalarFPTypeInSSEReg(Op.getValueType()) &&
14118 Subtarget->is64Bit()) {
14122 unsigned Size = SrcVT.getSizeInBits()/8;
14123 MachineFunction &MF = DAG.getMachineFunction();
14124 int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size, false);
14125 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14126 SDValue Chain = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14128 MachinePointerInfo::getFixedStack(SSFI),
14130 return BuildFILD(Op, SrcVT, Chain, StackSlot, DAG);
14133 SDValue X86TargetLowering::BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain,
14135 SelectionDAG &DAG) const {
14139 bool useSSE = isScalarFPTypeInSSEReg(Op.getValueType());
14141 Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Glue);
14143 Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
14145 unsigned ByteSize = SrcVT.getSizeInBits()/8;
14147 FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(StackSlot);
14148 MachineMemOperand *MMO;
14150 int SSFI = FI->getIndex();
14152 DAG.getMachineFunction()
14153 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14154 MachineMemOperand::MOLoad, ByteSize, ByteSize);
14156 MMO = cast<LoadSDNode>(StackSlot)->getMemOperand();
14157 StackSlot = StackSlot.getOperand(1);
14159 SDValue Ops[] = { Chain, StackSlot, DAG.getValueType(SrcVT) };
14160 SDValue Result = DAG.getMemIntrinsicNode(useSSE ? X86ISD::FILD_FLAG :
14162 Tys, Ops, SrcVT, MMO);
14165 Chain = Result.getValue(1);
14166 SDValue InFlag = Result.getValue(2);
14168 // FIXME: Currently the FST is flagged to the FILD_FLAG. This
14169 // shouldn't be necessary except that RFP cannot be live across
14170 // multiple blocks. When stackifier is fixed, they can be uncoupled.
14171 MachineFunction &MF = DAG.getMachineFunction();
14172 unsigned SSFISize = Op.getValueType().getSizeInBits()/8;
14173 int SSFI = MF.getFrameInfo()->CreateStackObject(SSFISize, SSFISize, false);
14174 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14175 Tys = DAG.getVTList(MVT::Other);
14177 Chain, Result, StackSlot, DAG.getValueType(Op.getValueType()), InFlag
14179 MachineMemOperand *MMO =
14180 DAG.getMachineFunction()
14181 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14182 MachineMemOperand::MOStore, SSFISize, SSFISize);
14184 Chain = DAG.getMemIntrinsicNode(X86ISD::FST, DL, Tys,
14185 Ops, Op.getValueType(), MMO);
14186 Result = DAG.getLoad(Op.getValueType(), DL, Chain, StackSlot,
14187 MachinePointerInfo::getFixedStack(SSFI),
14188 false, false, false, 0);
14194 // LowerUINT_TO_FP_i64 - 64-bit unsigned integer to double expansion.
14195 SDValue X86TargetLowering::LowerUINT_TO_FP_i64(SDValue Op,
14196 SelectionDAG &DAG) const {
14197 // This algorithm is not obvious. Here it is what we're trying to output:
14200 punpckldq (c0), %xmm0 // c0: (uint4){ 0x43300000U, 0x45300000U, 0U, 0U }
14201 subpd (c1), %xmm0 // c1: (double2){ 0x1.0p52, 0x1.0p52 * 0x1.0p32 }
14203 haddpd %xmm0, %xmm0
14205 pshufd $0x4e, %xmm0, %xmm1
14211 LLVMContext *Context = DAG.getContext();
14213 // Build some magic constants.
14214 static const uint32_t CV0[] = { 0x43300000, 0x45300000, 0, 0 };
14215 Constant *C0 = ConstantDataVector::get(*Context, CV0);
14216 SDValue CPIdx0 = DAG.getConstantPool(C0, getPointerTy(), 16);
14218 SmallVector<Constant*,2> CV1;
14220 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14221 APInt(64, 0x4330000000000000ULL))));
14223 ConstantFP::get(*Context, APFloat(APFloat::IEEEdouble,
14224 APInt(64, 0x4530000000000000ULL))));
14225 Constant *C1 = ConstantVector::get(CV1);
14226 SDValue CPIdx1 = DAG.getConstantPool(C1, getPointerTy(), 16);
14228 // Load the 64-bit value into an XMM register.
14229 SDValue XR1 = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v2i64,
14231 SDValue CLod0 = DAG.getLoad(MVT::v4i32, dl, DAG.getEntryNode(), CPIdx0,
14232 MachinePointerInfo::getConstantPool(),
14233 false, false, false, 16);
14234 SDValue Unpck1 = getUnpackl(DAG, dl, MVT::v4i32,
14235 DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, XR1),
14238 SDValue CLod1 = DAG.getLoad(MVT::v2f64, dl, CLod0.getValue(1), CPIdx1,
14239 MachinePointerInfo::getConstantPool(),
14240 false, false, false, 16);
14241 SDValue XR2F = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Unpck1);
14242 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, XR2F, CLod1);
14245 if (Subtarget->hasSSE3()) {
14246 // FIXME: The 'haddpd' instruction may be slower than 'movhlps + addsd'.
14247 Result = DAG.getNode(X86ISD::FHADD, dl, MVT::v2f64, Sub, Sub);
14249 SDValue S2F = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Sub);
14250 SDValue Shuffle = getTargetShuffleNode(X86ISD::PSHUFD, dl, MVT::v4i32,
14252 Result = DAG.getNode(ISD::FADD, dl, MVT::v2f64,
14253 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Shuffle),
14257 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, Result,
14258 DAG.getIntPtrConstant(0));
14261 // LowerUINT_TO_FP_i32 - 32-bit unsigned integer to float expansion.
14262 SDValue X86TargetLowering::LowerUINT_TO_FP_i32(SDValue Op,
14263 SelectionDAG &DAG) const {
14265 // FP constant to bias correct the final result.
14266 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
14269 // Load the 32-bit value into an XMM register.
14270 SDValue Load = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32,
14273 // Zero out the upper parts of the register.
14274 Load = getShuffleVectorZeroOrUndef(Load, 0, true, Subtarget, DAG);
14276 Load = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14277 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Load),
14278 DAG.getIntPtrConstant(0));
14280 // Or the load with the bias.
14281 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64,
14282 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14283 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14284 MVT::v2f64, Load)),
14285 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64,
14286 DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
14287 MVT::v2f64, Bias)));
14288 Or = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64,
14289 DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or),
14290 DAG.getIntPtrConstant(0));
14292 // Subtract the bias.
14293 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::f64, Or, Bias);
14295 // Handle final rounding.
14296 EVT DestVT = Op.getValueType();
14298 if (DestVT.bitsLT(MVT::f64))
14299 return DAG.getNode(ISD::FP_ROUND, dl, DestVT, Sub,
14300 DAG.getIntPtrConstant(0));
14301 if (DestVT.bitsGT(MVT::f64))
14302 return DAG.getNode(ISD::FP_EXTEND, dl, DestVT, Sub);
14304 // Handle final rounding.
14308 static SDValue lowerUINT_TO_FP_vXi32(SDValue Op, SelectionDAG &DAG,
14309 const X86Subtarget &Subtarget) {
14310 // The algorithm is the following:
14311 // #ifdef __SSE4_1__
14312 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14313 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14314 // (uint4) 0x53000000, 0xaa);
14316 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14317 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14319 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14320 // return (float4) lo + fhi;
14323 SDValue V = Op->getOperand(0);
14324 EVT VecIntVT = V.getValueType();
14325 bool Is128 = VecIntVT == MVT::v4i32;
14326 EVT VecFloatVT = Is128 ? MVT::v4f32 : MVT::v8f32;
14327 // If we convert to something else than the supported type, e.g., to v4f64,
14329 if (VecFloatVT != Op->getValueType(0))
14332 unsigned NumElts = VecIntVT.getVectorNumElements();
14333 assert((VecIntVT == MVT::v4i32 || VecIntVT == MVT::v8i32) &&
14334 "Unsupported custom type");
14335 assert(NumElts <= 8 && "The size of the constant array must be fixed");
14337 // In the #idef/#else code, we have in common:
14338 // - The vector of constants:
14344 // Create the splat vector for 0x4b000000.
14345 SDValue CstLow = DAG.getConstant(0x4b000000, MVT::i32);
14346 SDValue CstLowArray[] = {CstLow, CstLow, CstLow, CstLow,
14347 CstLow, CstLow, CstLow, CstLow};
14348 SDValue VecCstLow = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14349 makeArrayRef(&CstLowArray[0], NumElts));
14350 // Create the splat vector for 0x53000000.
14351 SDValue CstHigh = DAG.getConstant(0x53000000, MVT::i32);
14352 SDValue CstHighArray[] = {CstHigh, CstHigh, CstHigh, CstHigh,
14353 CstHigh, CstHigh, CstHigh, CstHigh};
14354 SDValue VecCstHigh = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14355 makeArrayRef(&CstHighArray[0], NumElts));
14357 // Create the right shift.
14358 SDValue CstShift = DAG.getConstant(16, MVT::i32);
14359 SDValue CstShiftArray[] = {CstShift, CstShift, CstShift, CstShift,
14360 CstShift, CstShift, CstShift, CstShift};
14361 SDValue VecCstShift = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT,
14362 makeArrayRef(&CstShiftArray[0], NumElts));
14363 SDValue HighShift = DAG.getNode(ISD::SRL, DL, VecIntVT, V, VecCstShift);
14366 if (Subtarget.hasSSE41()) {
14367 EVT VecI16VT = Is128 ? MVT::v8i16 : MVT::v16i16;
14368 // uint4 lo = _mm_blend_epi16( v, (uint4) 0x4b000000, 0xaa);
14369 SDValue VecCstLowBitcast =
14370 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstLow);
14371 SDValue VecBitcast = DAG.getNode(ISD::BITCAST, DL, VecI16VT, V);
14372 // Low will be bitcasted right away, so do not bother bitcasting back to its
14374 Low = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecBitcast,
14375 VecCstLowBitcast, DAG.getConstant(0xaa, MVT::i32));
14376 // uint4 hi = _mm_blend_epi16( _mm_srli_epi32(v,16),
14377 // (uint4) 0x53000000, 0xaa);
14378 SDValue VecCstHighBitcast =
14379 DAG.getNode(ISD::BITCAST, DL, VecI16VT, VecCstHigh);
14380 SDValue VecShiftBitcast =
14381 DAG.getNode(ISD::BITCAST, DL, VecI16VT, HighShift);
14382 // High will be bitcasted right away, so do not bother bitcasting back to
14383 // its original type.
14384 High = DAG.getNode(X86ISD::BLENDI, DL, VecI16VT, VecShiftBitcast,
14385 VecCstHighBitcast, DAG.getConstant(0xaa, MVT::i32));
14387 SDValue CstMask = DAG.getConstant(0xffff, MVT::i32);
14388 SDValue VecCstMask = DAG.getNode(ISD::BUILD_VECTOR, DL, VecIntVT, CstMask,
14389 CstMask, CstMask, CstMask);
14390 // uint4 lo = (v & (uint4) 0xffff) | (uint4) 0x4b000000;
14391 SDValue LowAnd = DAG.getNode(ISD::AND, DL, VecIntVT, V, VecCstMask);
14392 Low = DAG.getNode(ISD::OR, DL, VecIntVT, LowAnd, VecCstLow);
14394 // uint4 hi = (v >> 16) | (uint4) 0x53000000;
14395 High = DAG.getNode(ISD::OR, DL, VecIntVT, HighShift, VecCstHigh);
14398 // Create the vector constant for -(0x1.0p39f + 0x1.0p23f).
14399 SDValue CstFAdd = DAG.getConstantFP(
14400 APFloat(APFloat::IEEEsingle, APInt(32, 0xD3000080)), MVT::f32);
14401 SDValue CstFAddArray[] = {CstFAdd, CstFAdd, CstFAdd, CstFAdd,
14402 CstFAdd, CstFAdd, CstFAdd, CstFAdd};
14403 SDValue VecCstFAdd = DAG.getNode(ISD::BUILD_VECTOR, DL, VecFloatVT,
14404 makeArrayRef(&CstFAddArray[0], NumElts));
14406 // float4 fhi = (float4) hi - (0x1.0p39f + 0x1.0p23f);
14407 SDValue HighBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, High);
14409 DAG.getNode(ISD::FADD, DL, VecFloatVT, HighBitcast, VecCstFAdd);
14410 // return (float4) lo + fhi;
14411 SDValue LowBitcast = DAG.getNode(ISD::BITCAST, DL, VecFloatVT, Low);
14412 return DAG.getNode(ISD::FADD, DL, VecFloatVT, LowBitcast, FHigh);
14415 SDValue X86TargetLowering::lowerUINT_TO_FP_vec(SDValue Op,
14416 SelectionDAG &DAG) const {
14417 SDValue N0 = Op.getOperand(0);
14418 MVT SVT = N0.getSimpleValueType();
14421 switch (SVT.SimpleTy) {
14423 llvm_unreachable("Custom UINT_TO_FP is not supported!");
14428 MVT NVT = MVT::getVectorVT(MVT::i32, SVT.getVectorNumElements());
14429 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(),
14430 DAG.getNode(ISD::ZERO_EXTEND, dl, NVT, N0));
14434 return lowerUINT_TO_FP_vXi32(Op, DAG, *Subtarget);
14436 llvm_unreachable(nullptr);
14439 SDValue X86TargetLowering::LowerUINT_TO_FP(SDValue Op,
14440 SelectionDAG &DAG) const {
14441 SDValue N0 = Op.getOperand(0);
14444 if (Op.getValueType().isVector())
14445 return lowerUINT_TO_FP_vec(Op, DAG);
14447 // Since UINT_TO_FP is legal (it's marked custom), dag combiner won't
14448 // optimize it to a SINT_TO_FP when the sign bit is known zero. Perform
14449 // the optimization here.
14450 if (DAG.SignBitIsZero(N0))
14451 return DAG.getNode(ISD::SINT_TO_FP, dl, Op.getValueType(), N0);
14453 MVT SrcVT = N0.getSimpleValueType();
14454 MVT DstVT = Op.getSimpleValueType();
14455 if (SrcVT == MVT::i64 && DstVT == MVT::f64 && X86ScalarSSEf64)
14456 return LowerUINT_TO_FP_i64(Op, DAG);
14457 if (SrcVT == MVT::i32 && X86ScalarSSEf64)
14458 return LowerUINT_TO_FP_i32(Op, DAG);
14459 if (Subtarget->is64Bit() && SrcVT == MVT::i64 && DstVT == MVT::f32)
14462 // Make a 64-bit buffer, and use it to build an FILD.
14463 SDValue StackSlot = DAG.CreateStackTemporary(MVT::i64);
14464 if (SrcVT == MVT::i32) {
14465 SDValue WordOff = DAG.getConstant(4, getPointerTy());
14466 SDValue OffsetSlot = DAG.getNode(ISD::ADD, dl,
14467 getPointerTy(), StackSlot, WordOff);
14468 SDValue Store1 = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14469 StackSlot, MachinePointerInfo(),
14471 SDValue Store2 = DAG.getStore(Store1, dl, DAG.getConstant(0, MVT::i32),
14472 OffsetSlot, MachinePointerInfo(),
14474 SDValue Fild = BuildFILD(Op, MVT::i64, Store2, StackSlot, DAG);
14478 assert(SrcVT == MVT::i64 && "Unexpected type in UINT_TO_FP");
14479 SDValue Store = DAG.getStore(DAG.getEntryNode(), dl, Op.getOperand(0),
14480 StackSlot, MachinePointerInfo(),
14482 // For i64 source, we need to add the appropriate power of 2 if the input
14483 // was negative. This is the same as the optimization in
14484 // DAGTypeLegalizer::ExpandIntOp_UNIT_TO_FP, and for it to be safe here,
14485 // we must be careful to do the computation in x87 extended precision, not
14486 // in SSE. (The generic code can't know it's OK to do this, or how to.)
14487 int SSFI = cast<FrameIndexSDNode>(StackSlot)->getIndex();
14488 MachineMemOperand *MMO =
14489 DAG.getMachineFunction()
14490 .getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14491 MachineMemOperand::MOLoad, 8, 8);
14493 SDVTList Tys = DAG.getVTList(MVT::f80, MVT::Other);
14494 SDValue Ops[] = { Store, StackSlot, DAG.getValueType(MVT::i64) };
14495 SDValue Fild = DAG.getMemIntrinsicNode(X86ISD::FILD, dl, Tys, Ops,
14498 APInt FF(32, 0x5F800000ULL);
14500 // Check whether the sign bit is set.
14501 SDValue SignSet = DAG.getSetCC(dl,
14502 getSetCCResultType(*DAG.getContext(), MVT::i64),
14503 Op.getOperand(0), DAG.getConstant(0, MVT::i64),
14506 // Build a 64 bit pair (0, FF) in the constant pool, with FF in the lo bits.
14507 SDValue FudgePtr = DAG.getConstantPool(
14508 ConstantInt::get(*DAG.getContext(), FF.zext(64)),
14511 // Get a pointer to FF if the sign bit was set, or to 0 otherwise.
14512 SDValue Zero = DAG.getIntPtrConstant(0);
14513 SDValue Four = DAG.getIntPtrConstant(4);
14514 SDValue Offset = DAG.getNode(ISD::SELECT, dl, Zero.getValueType(), SignSet,
14516 FudgePtr = DAG.getNode(ISD::ADD, dl, getPointerTy(), FudgePtr, Offset);
14518 // Load the value out, extending it from f32 to f80.
14519 // FIXME: Avoid the extend by constructing the right constant pool?
14520 SDValue Fudge = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::f80, DAG.getEntryNode(),
14521 FudgePtr, MachinePointerInfo::getConstantPool(),
14522 MVT::f32, false, false, false, 4);
14523 // Extend everything to 80 bits to force it to be done on x87.
14524 SDValue Add = DAG.getNode(ISD::FADD, dl, MVT::f80, Fild, Fudge);
14525 return DAG.getNode(ISD::FP_ROUND, dl, DstVT, Add, DAG.getIntPtrConstant(0));
14528 std::pair<SDValue,SDValue>
14529 X86TargetLowering:: FP_TO_INTHelper(SDValue Op, SelectionDAG &DAG,
14530 bool IsSigned, bool IsReplace) const {
14533 EVT DstTy = Op.getValueType();
14535 if (!IsSigned && !isIntegerTypeFTOL(DstTy)) {
14536 assert(DstTy == MVT::i32 && "Unexpected FP_TO_UINT");
14540 assert(DstTy.getSimpleVT() <= MVT::i64 &&
14541 DstTy.getSimpleVT() >= MVT::i16 &&
14542 "Unknown FP_TO_INT to lower!");
14544 // These are really Legal.
14545 if (DstTy == MVT::i32 &&
14546 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14547 return std::make_pair(SDValue(), SDValue());
14548 if (Subtarget->is64Bit() &&
14549 DstTy == MVT::i64 &&
14550 isScalarFPTypeInSSEReg(Op.getOperand(0).getValueType()))
14551 return std::make_pair(SDValue(), SDValue());
14553 // We lower FP->int64 either into FISTP64 followed by a load from a temporary
14554 // stack slot, or into the FTOL runtime function.
14555 MachineFunction &MF = DAG.getMachineFunction();
14556 unsigned MemSize = DstTy.getSizeInBits()/8;
14557 int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14558 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14561 if (!IsSigned && isIntegerTypeFTOL(DstTy))
14562 Opc = X86ISD::WIN_FTOL;
14564 switch (DstTy.getSimpleVT().SimpleTy) {
14565 default: llvm_unreachable("Invalid FP_TO_SINT to lower!");
14566 case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
14567 case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
14568 case MVT::i64: Opc = X86ISD::FP_TO_INT64_IN_MEM; break;
14571 SDValue Chain = DAG.getEntryNode();
14572 SDValue Value = Op.getOperand(0);
14573 EVT TheVT = Op.getOperand(0).getValueType();
14574 // FIXME This causes a redundant load/store if the SSE-class value is already
14575 // in memory, such as if it is on the callstack.
14576 if (isScalarFPTypeInSSEReg(TheVT)) {
14577 assert(DstTy == MVT::i64 && "Invalid FP_TO_SINT to lower!");
14578 Chain = DAG.getStore(Chain, DL, Value, StackSlot,
14579 MachinePointerInfo::getFixedStack(SSFI),
14581 SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
14583 Chain, StackSlot, DAG.getValueType(TheVT)
14586 MachineMemOperand *MMO =
14587 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14588 MachineMemOperand::MOLoad, MemSize, MemSize);
14589 Value = DAG.getMemIntrinsicNode(X86ISD::FLD, DL, Tys, Ops, DstTy, MMO);
14590 Chain = Value.getValue(1);
14591 SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize, false);
14592 StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
14595 MachineMemOperand *MMO =
14596 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
14597 MachineMemOperand::MOStore, MemSize, MemSize);
14599 if (Opc != X86ISD::WIN_FTOL) {
14600 // Build the FP_TO_INT*_IN_MEM
14601 SDValue Ops[] = { Chain, Value, StackSlot };
14602 SDValue FIST = DAG.getMemIntrinsicNode(Opc, DL, DAG.getVTList(MVT::Other),
14604 return std::make_pair(FIST, StackSlot);
14606 SDValue ftol = DAG.getNode(X86ISD::WIN_FTOL, DL,
14607 DAG.getVTList(MVT::Other, MVT::Glue),
14609 SDValue eax = DAG.getCopyFromReg(ftol, DL, X86::EAX,
14610 MVT::i32, ftol.getValue(1));
14611 SDValue edx = DAG.getCopyFromReg(eax.getValue(1), DL, X86::EDX,
14612 MVT::i32, eax.getValue(2));
14613 SDValue Ops[] = { eax, edx };
14614 SDValue pair = IsReplace
14615 ? DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops)
14616 : DAG.getMergeValues(Ops, DL);
14617 return std::make_pair(pair, SDValue());
14621 static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
14622 const X86Subtarget *Subtarget) {
14623 MVT VT = Op->getSimpleValueType(0);
14624 SDValue In = Op->getOperand(0);
14625 MVT InVT = In.getSimpleValueType();
14628 // Optimize vectors in AVX mode:
14631 // Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
14632 // Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
14633 // Concat upper and lower parts.
14636 // Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
14637 // Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
14638 // Concat upper and lower parts.
14641 if (((VT != MVT::v16i16) || (InVT != MVT::v16i8)) &&
14642 ((VT != MVT::v8i32) || (InVT != MVT::v8i16)) &&
14643 ((VT != MVT::v4i64) || (InVT != MVT::v4i32)))
14646 if (Subtarget->hasInt256())
14647 return DAG.getNode(X86ISD::VZEXT, dl, VT, In);
14649 SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
14650 SDValue Undef = DAG.getUNDEF(InVT);
14651 bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
14652 SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14653 SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
14655 MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
14656 VT.getVectorNumElements()/2);
14658 OpLo = DAG.getNode(ISD::BITCAST, dl, HVT, OpLo);
14659 OpHi = DAG.getNode(ISD::BITCAST, dl, HVT, OpHi);
14661 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
14664 static SDValue LowerZERO_EXTEND_AVX512(SDValue Op,
14665 SelectionDAG &DAG) {
14666 MVT VT = Op->getSimpleValueType(0);
14667 SDValue In = Op->getOperand(0);
14668 MVT InVT = In.getSimpleValueType();
14670 unsigned int NumElts = VT.getVectorNumElements();
14671 if (NumElts != 8 && NumElts != 16)
14674 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1)
14675 return DAG.getNode(X86ISD::VZEXT, DL, VT, In);
14677 EVT ExtVT = (NumElts == 8)? MVT::v8i64 : MVT::v16i32;
14678 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14679 // Now we have only mask extension
14680 assert(InVT.getVectorElementType() == MVT::i1);
14681 SDValue Cst = DAG.getTargetConstant(1, ExtVT.getScalarType());
14682 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14683 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
14684 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14685 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14686 MachinePointerInfo::getConstantPool(),
14687 false, false, false, Alignment);
14689 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, DL, ExtVT, In, Ld);
14690 if (VT.is512BitVector())
14692 return DAG.getNode(X86ISD::VTRUNC, DL, VT, Brcst);
14695 static SDValue LowerANY_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14696 SelectionDAG &DAG) {
14697 if (Subtarget->hasFp256()) {
14698 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14706 static SDValue LowerZERO_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
14707 SelectionDAG &DAG) {
14709 MVT VT = Op.getSimpleValueType();
14710 SDValue In = Op.getOperand(0);
14711 MVT SVT = In.getSimpleValueType();
14713 if (VT.is512BitVector() || SVT.getVectorElementType() == MVT::i1)
14714 return LowerZERO_EXTEND_AVX512(Op, DAG);
14716 if (Subtarget->hasFp256()) {
14717 SDValue Res = LowerAVXExtend(Op, DAG, Subtarget);
14722 assert(!VT.is256BitVector() || !SVT.is128BitVector() ||
14723 VT.getVectorNumElements() != SVT.getVectorNumElements());
14727 SDValue X86TargetLowering::LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const {
14729 MVT VT = Op.getSimpleValueType();
14730 SDValue In = Op.getOperand(0);
14731 MVT InVT = In.getSimpleValueType();
14733 if (VT == MVT::i1) {
14734 assert((InVT.isInteger() && (InVT.getSizeInBits() <= 64)) &&
14735 "Invalid scalar TRUNCATE operation");
14736 if (InVT.getSizeInBits() >= 32)
14738 In = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, In);
14739 return DAG.getNode(ISD::TRUNCATE, DL, VT, In);
14741 assert(VT.getVectorNumElements() == InVT.getVectorNumElements() &&
14742 "Invalid TRUNCATE operation");
14744 if (InVT.is512BitVector() || VT.getVectorElementType() == MVT::i1) {
14745 if (VT.getVectorElementType().getSizeInBits() >=8)
14746 return DAG.getNode(X86ISD::VTRUNC, DL, VT, In);
14748 assert(VT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
14749 unsigned NumElts = InVT.getVectorNumElements();
14750 assert ((NumElts == 8 || NumElts == 16) && "Unexpected vector type");
14751 if (InVT.getSizeInBits() < 512) {
14752 MVT ExtVT = (NumElts == 16)? MVT::v16i32 : MVT::v8i64;
14753 In = DAG.getNode(ISD::SIGN_EXTEND, DL, ExtVT, In);
14757 SDValue Cst = DAG.getTargetConstant(1, InVT.getVectorElementType());
14758 const Constant *C = (dyn_cast<ConstantSDNode>(Cst))->getConstantIntValue();
14759 SDValue CP = DAG.getConstantPool(C, getPointerTy());
14760 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
14761 SDValue Ld = DAG.getLoad(Cst.getValueType(), DL, DAG.getEntryNode(), CP,
14762 MachinePointerInfo::getConstantPool(),
14763 false, false, false, Alignment);
14764 SDValue OneV = DAG.getNode(X86ISD::VBROADCAST, DL, InVT, Ld);
14765 SDValue And = DAG.getNode(ISD::AND, DL, InVT, OneV, In);
14766 return DAG.getNode(X86ISD::TESTM, DL, VT, And, And);
14769 if ((VT == MVT::v4i32) && (InVT == MVT::v4i64)) {
14770 // On AVX2, v4i64 -> v4i32 becomes VPERMD.
14771 if (Subtarget->hasInt256()) {
14772 static const int ShufMask[] = {0, 2, 4, 6, -1, -1, -1, -1};
14773 In = DAG.getNode(ISD::BITCAST, DL, MVT::v8i32, In);
14774 In = DAG.getVectorShuffle(MVT::v8i32, DL, In, DAG.getUNDEF(MVT::v8i32),
14776 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, In,
14777 DAG.getIntPtrConstant(0));
14780 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14781 DAG.getIntPtrConstant(0));
14782 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14783 DAG.getIntPtrConstant(2));
14784 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14785 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14786 static const int ShufMask[] = {0, 2, 4, 6};
14787 return DAG.getVectorShuffle(VT, DL, OpLo, OpHi, ShufMask);
14790 if ((VT == MVT::v8i16) && (InVT == MVT::v8i32)) {
14791 // On AVX2, v8i32 -> v8i16 becomed PSHUFB.
14792 if (Subtarget->hasInt256()) {
14793 In = DAG.getNode(ISD::BITCAST, DL, MVT::v32i8, In);
14795 SmallVector<SDValue,32> pshufbMask;
14796 for (unsigned i = 0; i < 2; ++i) {
14797 pshufbMask.push_back(DAG.getConstant(0x0, MVT::i8));
14798 pshufbMask.push_back(DAG.getConstant(0x1, MVT::i8));
14799 pshufbMask.push_back(DAG.getConstant(0x4, MVT::i8));
14800 pshufbMask.push_back(DAG.getConstant(0x5, MVT::i8));
14801 pshufbMask.push_back(DAG.getConstant(0x8, MVT::i8));
14802 pshufbMask.push_back(DAG.getConstant(0x9, MVT::i8));
14803 pshufbMask.push_back(DAG.getConstant(0xc, MVT::i8));
14804 pshufbMask.push_back(DAG.getConstant(0xd, MVT::i8));
14805 for (unsigned j = 0; j < 8; ++j)
14806 pshufbMask.push_back(DAG.getConstant(0x80, MVT::i8));
14808 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v32i8, pshufbMask);
14809 In = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v32i8, In, BV);
14810 In = DAG.getNode(ISD::BITCAST, DL, MVT::v4i64, In);
14812 static const int ShufMask[] = {0, 2, -1, -1};
14813 In = DAG.getVectorShuffle(MVT::v4i64, DL, In, DAG.getUNDEF(MVT::v4i64),
14815 In = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v2i64, In,
14816 DAG.getIntPtrConstant(0));
14817 return DAG.getNode(ISD::BITCAST, DL, VT, In);
14820 SDValue OpLo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14821 DAG.getIntPtrConstant(0));
14823 SDValue OpHi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, MVT::v4i32, In,
14824 DAG.getIntPtrConstant(4));
14826 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpLo);
14827 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, OpHi);
14829 // The PSHUFB mask:
14830 static const int ShufMask1[] = {0, 1, 4, 5, 8, 9, 12, 13,
14831 -1, -1, -1, -1, -1, -1, -1, -1};
14833 SDValue Undef = DAG.getUNDEF(MVT::v16i8);
14834 OpLo = DAG.getVectorShuffle(MVT::v16i8, DL, OpLo, Undef, ShufMask1);
14835 OpHi = DAG.getVectorShuffle(MVT::v16i8, DL, OpHi, Undef, ShufMask1);
14837 OpLo = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpLo);
14838 OpHi = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, OpHi);
14840 // The MOVLHPS Mask:
14841 static const int ShufMask2[] = {0, 1, 4, 5};
14842 SDValue res = DAG.getVectorShuffle(MVT::v4i32, DL, OpLo, OpHi, ShufMask2);
14843 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, res);
14846 // Handle truncation of V256 to V128 using shuffles.
14847 if (!VT.is128BitVector() || !InVT.is256BitVector())
14850 assert(Subtarget->hasFp256() && "256-bit vector without AVX!");
14852 unsigned NumElems = VT.getVectorNumElements();
14853 MVT NVT = MVT::getVectorVT(VT.getVectorElementType(), NumElems * 2);
14855 SmallVector<int, 16> MaskVec(NumElems * 2, -1);
14856 // Prepare truncation shuffle mask
14857 for (unsigned i = 0; i != NumElems; ++i)
14858 MaskVec[i] = i * 2;
14859 SDValue V = DAG.getVectorShuffle(NVT, DL,
14860 DAG.getNode(ISD::BITCAST, DL, NVT, In),
14861 DAG.getUNDEF(NVT), &MaskVec[0]);
14862 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V,
14863 DAG.getIntPtrConstant(0));
14866 SDValue X86TargetLowering::LowerFP_TO_SINT(SDValue Op,
14867 SelectionDAG &DAG) const {
14868 assert(!Op.getSimpleValueType().isVector());
14870 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14871 /*IsSigned=*/ true, /*IsReplace=*/ false);
14872 SDValue FIST = Vals.first, StackSlot = Vals.second;
14873 // If FP_TO_INTHelper failed, the node is actually supposed to be Legal.
14874 if (!FIST.getNode()) return Op;
14876 if (StackSlot.getNode())
14877 // Load the result.
14878 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14879 FIST, StackSlot, MachinePointerInfo(),
14880 false, false, false, 0);
14882 // The node is the result.
14886 SDValue X86TargetLowering::LowerFP_TO_UINT(SDValue Op,
14887 SelectionDAG &DAG) const {
14888 std::pair<SDValue,SDValue> Vals = FP_TO_INTHelper(Op, DAG,
14889 /*IsSigned=*/ false, /*IsReplace=*/ false);
14890 SDValue FIST = Vals.first, StackSlot = Vals.second;
14891 assert(FIST.getNode() && "Unexpected failure");
14893 if (StackSlot.getNode())
14894 // Load the result.
14895 return DAG.getLoad(Op.getValueType(), SDLoc(Op),
14896 FIST, StackSlot, MachinePointerInfo(),
14897 false, false, false, 0);
14899 // The node is the result.
14903 static SDValue LowerFP_EXTEND(SDValue Op, SelectionDAG &DAG) {
14905 MVT VT = Op.getSimpleValueType();
14906 SDValue In = Op.getOperand(0);
14907 MVT SVT = In.getSimpleValueType();
14909 assert(SVT == MVT::v2f32 && "Only customize MVT::v2f32 type legalization!");
14911 return DAG.getNode(X86ISD::VFPEXT, DL, VT,
14912 DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v4f32,
14913 In, DAG.getUNDEF(SVT)));
14916 /// The only differences between FABS and FNEG are the mask and the logic op.
14917 /// FNEG also has a folding opportunity for FNEG(FABS(x)).
14918 static SDValue LowerFABSorFNEG(SDValue Op, SelectionDAG &DAG) {
14919 assert((Op.getOpcode() == ISD::FABS || Op.getOpcode() == ISD::FNEG) &&
14920 "Wrong opcode for lowering FABS or FNEG.");
14922 bool IsFABS = (Op.getOpcode() == ISD::FABS);
14924 // If this is a FABS and it has an FNEG user, bail out to fold the combination
14925 // into an FNABS. We'll lower the FABS after that if it is still in use.
14927 for (SDNode *User : Op->uses())
14928 if (User->getOpcode() == ISD::FNEG)
14931 SDValue Op0 = Op.getOperand(0);
14932 bool IsFNABS = !IsFABS && (Op0.getOpcode() == ISD::FABS);
14935 MVT VT = Op.getSimpleValueType();
14936 // Assume scalar op for initialization; update for vector if needed.
14937 // Note that there are no scalar bitwise logical SSE/AVX instructions, so we
14938 // generate a 16-byte vector constant and logic op even for the scalar case.
14939 // Using a 16-byte mask allows folding the load of the mask with
14940 // the logic op, so it can save (~4 bytes) on code size.
14942 unsigned NumElts = VT == MVT::f64 ? 2 : 4;
14943 // FIXME: Use function attribute "OptimizeForSize" and/or CodeGenOpt::Level to
14944 // decide if we should generate a 16-byte constant mask when we only need 4 or
14945 // 8 bytes for the scalar case.
14946 if (VT.isVector()) {
14947 EltVT = VT.getVectorElementType();
14948 NumElts = VT.getVectorNumElements();
14951 unsigned EltBits = EltVT.getSizeInBits();
14952 LLVMContext *Context = DAG.getContext();
14953 // For FABS, mask is 0x7f...; for FNEG, mask is 0x80...
14955 IsFABS ? APInt::getSignedMaxValue(EltBits) : APInt::getSignBit(EltBits);
14956 Constant *C = ConstantInt::get(*Context, MaskElt);
14957 C = ConstantVector::getSplat(NumElts, C);
14958 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14959 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy());
14960 unsigned Alignment = cast<ConstantPoolSDNode>(CPIdx)->getAlignment();
14961 SDValue Mask = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
14962 MachinePointerInfo::getConstantPool(),
14963 false, false, false, Alignment);
14965 if (VT.isVector()) {
14966 // For a vector, cast operands to a vector type, perform the logic op,
14967 // and cast the result back to the original value type.
14968 MVT VecVT = MVT::getVectorVT(MVT::i64, VT.getSizeInBits() / 64);
14969 SDValue MaskCasted = DAG.getNode(ISD::BITCAST, dl, VecVT, Mask);
14970 SDValue Operand = IsFNABS ?
14971 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0.getOperand(0)) :
14972 DAG.getNode(ISD::BITCAST, dl, VecVT, Op0);
14973 unsigned BitOp = IsFABS ? ISD::AND : IsFNABS ? ISD::OR : ISD::XOR;
14974 return DAG.getNode(ISD::BITCAST, dl, VT,
14975 DAG.getNode(BitOp, dl, VecVT, Operand, MaskCasted));
14978 // If not vector, then scalar.
14979 unsigned BitOp = IsFABS ? X86ISD::FAND : IsFNABS ? X86ISD::FOR : X86ISD::FXOR;
14980 SDValue Operand = IsFNABS ? Op0.getOperand(0) : Op0;
14981 return DAG.getNode(BitOp, dl, VT, Operand, Mask);
14984 static SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) {
14985 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
14986 LLVMContext *Context = DAG.getContext();
14987 SDValue Op0 = Op.getOperand(0);
14988 SDValue Op1 = Op.getOperand(1);
14990 MVT VT = Op.getSimpleValueType();
14991 MVT SrcVT = Op1.getSimpleValueType();
14993 // If second operand is smaller, extend it first.
14994 if (SrcVT.bitsLT(VT)) {
14995 Op1 = DAG.getNode(ISD::FP_EXTEND, dl, VT, Op1);
14998 // And if it is bigger, shrink it first.
14999 if (SrcVT.bitsGT(VT)) {
15000 Op1 = DAG.getNode(ISD::FP_ROUND, dl, VT, Op1, DAG.getIntPtrConstant(1));
15004 // At this point the operands and the result should have the same
15005 // type, and that won't be f80 since that is not custom lowered.
15007 const fltSemantics &Sem =
15008 VT == MVT::f64 ? APFloat::IEEEdouble : APFloat::IEEEsingle;
15009 const unsigned SizeInBits = VT.getSizeInBits();
15011 SmallVector<Constant *, 4> CV(
15012 VT == MVT::f64 ? 2 : 4,
15013 ConstantFP::get(*Context, APFloat(Sem, APInt(SizeInBits, 0))));
15015 // First, clear all bits but the sign bit from the second operand (sign).
15016 CV[0] = ConstantFP::get(*Context,
15017 APFloat(Sem, APInt::getHighBitsSet(SizeInBits, 1)));
15018 Constant *C = ConstantVector::get(CV);
15019 SDValue CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15020 SDValue Mask1 = DAG.getLoad(SrcVT, dl, DAG.getEntryNode(), CPIdx,
15021 MachinePointerInfo::getConstantPool(),
15022 false, false, false, 16);
15023 SDValue SignBit = DAG.getNode(X86ISD::FAND, dl, SrcVT, Op1, Mask1);
15025 // Next, clear the sign bit from the first operand (magnitude).
15026 // If it's a constant, we can clear it here.
15027 if (ConstantFPSDNode *Op0CN = dyn_cast<ConstantFPSDNode>(Op0)) {
15028 APFloat APF = Op0CN->getValueAPF();
15029 // If the magnitude is a positive zero, the sign bit alone is enough.
15030 if (APF.isPosZero())
15033 CV[0] = ConstantFP::get(*Context, APF);
15035 CV[0] = ConstantFP::get(
15037 APFloat(Sem, APInt::getLowBitsSet(SizeInBits, SizeInBits - 1)));
15039 C = ConstantVector::get(CV);
15040 CPIdx = DAG.getConstantPool(C, TLI.getPointerTy(), 16);
15041 SDValue Val = DAG.getLoad(VT, dl, DAG.getEntryNode(), CPIdx,
15042 MachinePointerInfo::getConstantPool(),
15043 false, false, false, 16);
15044 // If the magnitude operand wasn't a constant, we need to AND out the sign.
15045 if (!isa<ConstantFPSDNode>(Op0))
15046 Val = DAG.getNode(X86ISD::FAND, dl, VT, Op0, Val);
15048 // OR the magnitude value with the sign bit.
15049 return DAG.getNode(X86ISD::FOR, dl, VT, Val, SignBit);
15052 static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
15053 SDValue N0 = Op.getOperand(0);
15055 MVT VT = Op.getSimpleValueType();
15057 // Lower ISD::FGETSIGN to (AND (X86ISD::FGETSIGNx86 ...) 1).
15058 SDValue xFGETSIGN = DAG.getNode(X86ISD::FGETSIGNx86, dl, VT, N0,
15059 DAG.getConstant(1, VT));
15060 return DAG.getNode(ISD::AND, dl, VT, xFGETSIGN, DAG.getConstant(1, VT));
15063 // Check whether an OR'd tree is PTEST-able.
15064 static SDValue LowerVectorAllZeroTest(SDValue Op, const X86Subtarget *Subtarget,
15065 SelectionDAG &DAG) {
15066 assert(Op.getOpcode() == ISD::OR && "Only check OR'd tree.");
15068 if (!Subtarget->hasSSE41())
15071 if (!Op->hasOneUse())
15074 SDNode *N = Op.getNode();
15077 SmallVector<SDValue, 8> Opnds;
15078 DenseMap<SDValue, unsigned> VecInMap;
15079 SmallVector<SDValue, 8> VecIns;
15080 EVT VT = MVT::Other;
15082 // Recognize a special case where a vector is casted into wide integer to
15084 Opnds.push_back(N->getOperand(0));
15085 Opnds.push_back(N->getOperand(1));
15087 for (unsigned Slot = 0, e = Opnds.size(); Slot < e; ++Slot) {
15088 SmallVectorImpl<SDValue>::const_iterator I = Opnds.begin() + Slot;
15089 // BFS traverse all OR'd operands.
15090 if (I->getOpcode() == ISD::OR) {
15091 Opnds.push_back(I->getOperand(0));
15092 Opnds.push_back(I->getOperand(1));
15093 // Re-evaluate the number of nodes to be traversed.
15094 e += 2; // 2 more nodes (LHS and RHS) are pushed.
15098 // Quit if a non-EXTRACT_VECTOR_ELT
15099 if (I->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
15102 // Quit if without a constant index.
15103 SDValue Idx = I->getOperand(1);
15104 if (!isa<ConstantSDNode>(Idx))
15107 SDValue ExtractedFromVec = I->getOperand(0);
15108 DenseMap<SDValue, unsigned>::iterator M = VecInMap.find(ExtractedFromVec);
15109 if (M == VecInMap.end()) {
15110 VT = ExtractedFromVec.getValueType();
15111 // Quit if not 128/256-bit vector.
15112 if (!VT.is128BitVector() && !VT.is256BitVector())
15114 // Quit if not the same type.
15115 if (VecInMap.begin() != VecInMap.end() &&
15116 VT != VecInMap.begin()->first.getValueType())
15118 M = VecInMap.insert(std::make_pair(ExtractedFromVec, 0)).first;
15119 VecIns.push_back(ExtractedFromVec);
15121 M->second |= 1U << cast<ConstantSDNode>(Idx)->getZExtValue();
15124 assert((VT.is128BitVector() || VT.is256BitVector()) &&
15125 "Not extracted from 128-/256-bit vector.");
15127 unsigned FullMask = (1U << VT.getVectorNumElements()) - 1U;
15129 for (DenseMap<SDValue, unsigned>::const_iterator
15130 I = VecInMap.begin(), E = VecInMap.end(); I != E; ++I) {
15131 // Quit if not all elements are used.
15132 if (I->second != FullMask)
15136 EVT TestVT = VT.is128BitVector() ? MVT::v2i64 : MVT::v4i64;
15138 // Cast all vectors into TestVT for PTEST.
15139 for (unsigned i = 0, e = VecIns.size(); i < e; ++i)
15140 VecIns[i] = DAG.getNode(ISD::BITCAST, DL, TestVT, VecIns[i]);
15142 // If more than one full vectors are evaluated, OR them first before PTEST.
15143 for (unsigned Slot = 0, e = VecIns.size(); e - Slot > 1; Slot += 2, e += 1) {
15144 // Each iteration will OR 2 nodes and append the result until there is only
15145 // 1 node left, i.e. the final OR'd value of all vectors.
15146 SDValue LHS = VecIns[Slot];
15147 SDValue RHS = VecIns[Slot + 1];
15148 VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
15151 return DAG.getNode(X86ISD::PTEST, DL, MVT::i32,
15152 VecIns.back(), VecIns.back());
15155 /// \brief return true if \c Op has a use that doesn't just read flags.
15156 static bool hasNonFlagsUse(SDValue Op) {
15157 for (SDNode::use_iterator UI = Op->use_begin(), UE = Op->use_end(); UI != UE;
15159 SDNode *User = *UI;
15160 unsigned UOpNo = UI.getOperandNo();
15161 if (User->getOpcode() == ISD::TRUNCATE && User->hasOneUse()) {
15162 // Look pass truncate.
15163 UOpNo = User->use_begin().getOperandNo();
15164 User = *User->use_begin();
15167 if (User->getOpcode() != ISD::BRCOND && User->getOpcode() != ISD::SETCC &&
15168 !(User->getOpcode() == ISD::SELECT && UOpNo == 0))
15174 /// Emit nodes that will be selected as "test Op0,Op0", or something
15176 SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
15177 SelectionDAG &DAG) const {
15178 if (Op.getValueType() == MVT::i1) {
15179 SDValue ExtOp = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i8, Op);
15180 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, ExtOp,
15181 DAG.getConstant(0, MVT::i8));
15183 // CF and OF aren't always set the way we want. Determine which
15184 // of these we need.
15185 bool NeedCF = false;
15186 bool NeedOF = false;
15189 case X86::COND_A: case X86::COND_AE:
15190 case X86::COND_B: case X86::COND_BE:
15193 case X86::COND_G: case X86::COND_GE:
15194 case X86::COND_L: case X86::COND_LE:
15195 case X86::COND_O: case X86::COND_NO: {
15196 // Check if we really need to set the
15197 // Overflow flag. If NoSignedWrap is present
15198 // that is not actually needed.
15199 switch (Op->getOpcode()) {
15204 const BinaryWithFlagsSDNode *BinNode =
15205 cast<BinaryWithFlagsSDNode>(Op.getNode());
15206 if (BinNode->hasNoSignedWrap())
15216 // See if we can use the EFLAGS value from the operand instead of
15217 // doing a separate TEST. TEST always sets OF and CF to 0, so unless
15218 // we prove that the arithmetic won't overflow, we can't use OF or CF.
15219 if (Op.getResNo() != 0 || NeedOF || NeedCF) {
15220 // Emit a CMP with 0, which is the TEST pattern.
15221 //if (Op.getValueType() == MVT::i1)
15222 // return DAG.getNode(X86ISD::CMP, dl, MVT::i1, Op,
15223 // DAG.getConstant(0, MVT::i1));
15224 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15225 DAG.getConstant(0, Op.getValueType()));
15227 unsigned Opcode = 0;
15228 unsigned NumOperands = 0;
15230 // Truncate operations may prevent the merge of the SETCC instruction
15231 // and the arithmetic instruction before it. Attempt to truncate the operands
15232 // of the arithmetic instruction and use a reduced bit-width instruction.
15233 bool NeedTruncation = false;
15234 SDValue ArithOp = Op;
15235 if (Op->getOpcode() == ISD::TRUNCATE && Op->hasOneUse()) {
15236 SDValue Arith = Op->getOperand(0);
15237 // Both the trunc and the arithmetic op need to have one user each.
15238 if (Arith->hasOneUse())
15239 switch (Arith.getOpcode()) {
15246 NeedTruncation = true;
15252 // NOTICE: In the code below we use ArithOp to hold the arithmetic operation
15253 // which may be the result of a CAST. We use the variable 'Op', which is the
15254 // non-casted variable when we check for possible users.
15255 switch (ArithOp.getOpcode()) {
15257 // Due to an isel shortcoming, be conservative if this add is likely to be
15258 // selected as part of a load-modify-store instruction. When the root node
15259 // in a match is a store, isel doesn't know how to remap non-chain non-flag
15260 // uses of other nodes in the match, such as the ADD in this case. This
15261 // leads to the ADD being left around and reselected, with the result being
15262 // two adds in the output. Alas, even if none our users are stores, that
15263 // doesn't prove we're O.K. Ergo, if we have any parents that aren't
15264 // CopyToReg or SETCC, eschew INC/DEC. A better fix seems to require
15265 // climbing the DAG back to the root, and it doesn't seem to be worth the
15267 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15268 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15269 if (UI->getOpcode() != ISD::CopyToReg &&
15270 UI->getOpcode() != ISD::SETCC &&
15271 UI->getOpcode() != ISD::STORE)
15274 if (ConstantSDNode *C =
15275 dyn_cast<ConstantSDNode>(ArithOp.getNode()->getOperand(1))) {
15276 // An add of one will be selected as an INC.
15277 if (C->getAPIntValue() == 1 && !Subtarget->slowIncDec()) {
15278 Opcode = X86ISD::INC;
15283 // An add of negative one (subtract of one) will be selected as a DEC.
15284 if (C->getAPIntValue().isAllOnesValue() && !Subtarget->slowIncDec()) {
15285 Opcode = X86ISD::DEC;
15291 // Otherwise use a regular EFLAGS-setting add.
15292 Opcode = X86ISD::ADD;
15297 // If we have a constant logical shift that's only used in a comparison
15298 // against zero turn it into an equivalent AND. This allows turning it into
15299 // a TEST instruction later.
15300 if ((X86CC == X86::COND_E || X86CC == X86::COND_NE) && Op->hasOneUse() &&
15301 isa<ConstantSDNode>(Op->getOperand(1)) && !hasNonFlagsUse(Op)) {
15302 EVT VT = Op.getValueType();
15303 unsigned BitWidth = VT.getSizeInBits();
15304 unsigned ShAmt = Op->getConstantOperandVal(1);
15305 if (ShAmt >= BitWidth) // Avoid undefined shifts.
15307 APInt Mask = ArithOp.getOpcode() == ISD::SRL
15308 ? APInt::getHighBitsSet(BitWidth, BitWidth - ShAmt)
15309 : APInt::getLowBitsSet(BitWidth, BitWidth - ShAmt);
15310 if (!Mask.isSignedIntN(32)) // Avoid large immediates.
15312 SDValue New = DAG.getNode(ISD::AND, dl, VT, Op->getOperand(0),
15313 DAG.getConstant(Mask, VT));
15314 DAG.ReplaceAllUsesWith(Op, New);
15320 // If the primary and result isn't used, don't bother using X86ISD::AND,
15321 // because a TEST instruction will be better.
15322 if (!hasNonFlagsUse(Op))
15328 // Due to the ISEL shortcoming noted above, be conservative if this op is
15329 // likely to be selected as part of a load-modify-store instruction.
15330 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
15331 UE = Op.getNode()->use_end(); UI != UE; ++UI)
15332 if (UI->getOpcode() == ISD::STORE)
15335 // Otherwise use a regular EFLAGS-setting instruction.
15336 switch (ArithOp.getOpcode()) {
15337 default: llvm_unreachable("unexpected operator!");
15338 case ISD::SUB: Opcode = X86ISD::SUB; break;
15339 case ISD::XOR: Opcode = X86ISD::XOR; break;
15340 case ISD::AND: Opcode = X86ISD::AND; break;
15342 if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
15343 SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
15344 if (EFLAGS.getNode())
15347 Opcode = X86ISD::OR;
15361 return SDValue(Op.getNode(), 1);
15367 // If we found that truncation is beneficial, perform the truncation and
15369 if (NeedTruncation) {
15370 EVT VT = Op.getValueType();
15371 SDValue WideVal = Op->getOperand(0);
15372 EVT WideVT = WideVal.getValueType();
15373 unsigned ConvertedOp = 0;
15374 // Use a target machine opcode to prevent further DAGCombine
15375 // optimizations that may separate the arithmetic operations
15376 // from the setcc node.
15377 switch (WideVal.getOpcode()) {
15379 case ISD::ADD: ConvertedOp = X86ISD::ADD; break;
15380 case ISD::SUB: ConvertedOp = X86ISD::SUB; break;
15381 case ISD::AND: ConvertedOp = X86ISD::AND; break;
15382 case ISD::OR: ConvertedOp = X86ISD::OR; break;
15383 case ISD::XOR: ConvertedOp = X86ISD::XOR; break;
15387 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
15388 if (TLI.isOperationLegal(WideVal.getOpcode(), WideVT)) {
15389 SDValue V0 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(0));
15390 SDValue V1 = DAG.getNode(ISD::TRUNCATE, dl, VT, WideVal.getOperand(1));
15391 Op = DAG.getNode(ConvertedOp, dl, VT, V0, V1);
15397 // Emit a CMP with 0, which is the TEST pattern.
15398 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op,
15399 DAG.getConstant(0, Op.getValueType()));
15401 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
15402 SmallVector<SDValue, 4> Ops;
15403 for (unsigned i = 0; i != NumOperands; ++i)
15404 Ops.push_back(Op.getOperand(i));
15406 SDValue New = DAG.getNode(Opcode, dl, VTs, Ops);
15407 DAG.ReplaceAllUsesWith(Op, New);
15408 return SDValue(New.getNode(), 1);
15411 /// Emit nodes that will be selected as "cmp Op0,Op1", or something
15413 SDValue X86TargetLowering::EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
15414 SDLoc dl, SelectionDAG &DAG) const {
15415 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op1)) {
15416 if (C->getAPIntValue() == 0)
15417 return EmitTest(Op0, X86CC, dl, DAG);
15419 if (Op0.getValueType() == MVT::i1)
15420 llvm_unreachable("Unexpected comparison operation for MVT::i1 operands");
15423 if ((Op0.getValueType() == MVT::i8 || Op0.getValueType() == MVT::i16 ||
15424 Op0.getValueType() == MVT::i32 || Op0.getValueType() == MVT::i64)) {
15425 // Do the comparison at i32 if it's smaller, besides the Atom case.
15426 // This avoids subregister aliasing issues. Keep the smaller reference
15427 // if we're optimizing for size, however, as that'll allow better folding
15428 // of memory operations.
15429 if (Op0.getValueType() != MVT::i32 && Op0.getValueType() != MVT::i64 &&
15430 !DAG.getMachineFunction().getFunction()->hasFnAttribute(
15431 Attribute::MinSize) &&
15432 !Subtarget->isAtom()) {
15433 unsigned ExtendOp =
15434 isX86CCUnsigned(X86CC) ? ISD::ZERO_EXTEND : ISD::SIGN_EXTEND;
15435 Op0 = DAG.getNode(ExtendOp, dl, MVT::i32, Op0);
15436 Op1 = DAG.getNode(ExtendOp, dl, MVT::i32, Op1);
15438 // Use SUB instead of CMP to enable CSE between SUB and CMP.
15439 SDVTList VTs = DAG.getVTList(Op0.getValueType(), MVT::i32);
15440 SDValue Sub = DAG.getNode(X86ISD::SUB, dl, VTs,
15442 return SDValue(Sub.getNode(), 1);
15444 return DAG.getNode(X86ISD::CMP, dl, MVT::i32, Op0, Op1);
15447 /// Convert a comparison if required by the subtarget.
15448 SDValue X86TargetLowering::ConvertCmpIfNecessary(SDValue Cmp,
15449 SelectionDAG &DAG) const {
15450 // If the subtarget does not support the FUCOMI instruction, floating-point
15451 // comparisons have to be converted.
15452 if (Subtarget->hasCMov() ||
15453 Cmp.getOpcode() != X86ISD::CMP ||
15454 !Cmp.getOperand(0).getValueType().isFloatingPoint() ||
15455 !Cmp.getOperand(1).getValueType().isFloatingPoint())
15458 // The instruction selector will select an FUCOM instruction instead of
15459 // FUCOMI, which writes the comparison result to FPSW instead of EFLAGS. Hence
15460 // build an SDNode sequence that transfers the result from FPSW into EFLAGS:
15461 // (X86sahf (trunc (srl (X86fp_stsw (trunc (X86cmp ...)), 8))))
15463 SDValue TruncFPSW = DAG.getNode(ISD::TRUNCATE, dl, MVT::i16, Cmp);
15464 SDValue FNStSW = DAG.getNode(X86ISD::FNSTSW16r, dl, MVT::i16, TruncFPSW);
15465 SDValue Srl = DAG.getNode(ISD::SRL, dl, MVT::i16, FNStSW,
15466 DAG.getConstant(8, MVT::i8));
15467 SDValue TruncSrl = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Srl);
15468 return DAG.getNode(X86ISD::SAHF, dl, MVT::i32, TruncSrl);
15471 /// The minimum architected relative accuracy is 2^-12. We need one
15472 /// Newton-Raphson step to have a good float result (24 bits of precision).
15473 SDValue X86TargetLowering::getRsqrtEstimate(SDValue Op,
15474 DAGCombinerInfo &DCI,
15475 unsigned &RefinementSteps,
15476 bool &UseOneConstNR) const {
15477 // FIXME: We should use instruction latency models to calculate the cost of
15478 // each potential sequence, but this is very hard to do reliably because
15479 // at least Intel's Core* chips have variable timing based on the number of
15480 // significant digits in the divisor and/or sqrt operand.
15481 if (!Subtarget->useSqrtEst())
15484 EVT VT = Op.getValueType();
15486 // SSE1 has rsqrtss and rsqrtps.
15487 // TODO: Add support for AVX512 (v16f32).
15488 // It is likely not profitable to do this for f64 because a double-precision
15489 // rsqrt estimate with refinement on x86 prior to FMA requires at least 16
15490 // instructions: convert to single, rsqrtss, convert back to double, refine
15491 // (3 steps = at least 13 insts). If an 'rsqrtsd' variant was added to the ISA
15492 // along with FMA, this could be a throughput win.
15493 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15494 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15495 RefinementSteps = 1;
15496 UseOneConstNR = false;
15497 return DCI.DAG.getNode(X86ISD::FRSQRT, SDLoc(Op), VT, Op);
15502 /// The minimum architected relative accuracy is 2^-12. We need one
15503 /// Newton-Raphson step to have a good float result (24 bits of precision).
15504 SDValue X86TargetLowering::getRecipEstimate(SDValue Op,
15505 DAGCombinerInfo &DCI,
15506 unsigned &RefinementSteps) const {
15507 // FIXME: We should use instruction latency models to calculate the cost of
15508 // each potential sequence, but this is very hard to do reliably because
15509 // at least Intel's Core* chips have variable timing based on the number of
15510 // significant digits in the divisor.
15511 if (!Subtarget->useReciprocalEst())
15514 EVT VT = Op.getValueType();
15516 // SSE1 has rcpss and rcpps. AVX adds a 256-bit variant for rcpps.
15517 // TODO: Add support for AVX512 (v16f32).
15518 // It is likely not profitable to do this for f64 because a double-precision
15519 // reciprocal estimate with refinement on x86 prior to FMA requires
15520 // 15 instructions: convert to single, rcpss, convert back to double, refine
15521 // (3 steps = 12 insts). If an 'rcpsd' variant was added to the ISA
15522 // along with FMA, this could be a throughput win.
15523 if ((Subtarget->hasSSE1() && (VT == MVT::f32 || VT == MVT::v4f32)) ||
15524 (Subtarget->hasAVX() && VT == MVT::v8f32)) {
15525 RefinementSteps = ReciprocalEstimateRefinementSteps;
15526 return DCI.DAG.getNode(X86ISD::FRCP, SDLoc(Op), VT, Op);
15531 static bool isAllOnes(SDValue V) {
15532 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V);
15533 return C && C->isAllOnesValue();
15536 /// LowerToBT - Result of 'and' is compared against zero. Turn it into a BT node
15537 /// if it's possible.
15538 SDValue X86TargetLowering::LowerToBT(SDValue And, ISD::CondCode CC,
15539 SDLoc dl, SelectionDAG &DAG) const {
15540 SDValue Op0 = And.getOperand(0);
15541 SDValue Op1 = And.getOperand(1);
15542 if (Op0.getOpcode() == ISD::TRUNCATE)
15543 Op0 = Op0.getOperand(0);
15544 if (Op1.getOpcode() == ISD::TRUNCATE)
15545 Op1 = Op1.getOperand(0);
15548 if (Op1.getOpcode() == ISD::SHL)
15549 std::swap(Op0, Op1);
15550 if (Op0.getOpcode() == ISD::SHL) {
15551 if (ConstantSDNode *And00C = dyn_cast<ConstantSDNode>(Op0.getOperand(0)))
15552 if (And00C->getZExtValue() == 1) {
15553 // If we looked past a truncate, check that it's only truncating away
15555 unsigned BitWidth = Op0.getValueSizeInBits();
15556 unsigned AndBitWidth = And.getValueSizeInBits();
15557 if (BitWidth > AndBitWidth) {
15559 DAG.computeKnownBits(Op0, Zeros, Ones);
15560 if (Zeros.countLeadingOnes() < BitWidth - AndBitWidth)
15564 RHS = Op0.getOperand(1);
15566 } else if (Op1.getOpcode() == ISD::Constant) {
15567 ConstantSDNode *AndRHS = cast<ConstantSDNode>(Op1);
15568 uint64_t AndRHSVal = AndRHS->getZExtValue();
15569 SDValue AndLHS = Op0;
15571 if (AndRHSVal == 1 && AndLHS.getOpcode() == ISD::SRL) {
15572 LHS = AndLHS.getOperand(0);
15573 RHS = AndLHS.getOperand(1);
15576 // Use BT if the immediate can't be encoded in a TEST instruction.
15577 if (!isUInt<32>(AndRHSVal) && isPowerOf2_64(AndRHSVal)) {
15579 RHS = DAG.getConstant(Log2_64_Ceil(AndRHSVal), LHS.getValueType());
15583 if (LHS.getNode()) {
15584 // If LHS is i8, promote it to i32 with any_extend. There is no i8 BT
15585 // instruction. Since the shift amount is in-range-or-undefined, we know
15586 // that doing a bittest on the i32 value is ok. We extend to i32 because
15587 // the encoding for the i16 version is larger than the i32 version.
15588 // Also promote i16 to i32 for performance / code size reason.
15589 if (LHS.getValueType() == MVT::i8 ||
15590 LHS.getValueType() == MVT::i16)
15591 LHS = DAG.getNode(ISD::ANY_EXTEND, dl, MVT::i32, LHS);
15593 // If the operand types disagree, extend the shift amount to match. Since
15594 // BT ignores high bits (like shifts) we can use anyextend.
15595 if (LHS.getValueType() != RHS.getValueType())
15596 RHS = DAG.getNode(ISD::ANY_EXTEND, dl, LHS.getValueType(), RHS);
15598 SDValue BT = DAG.getNode(X86ISD::BT, dl, MVT::i32, LHS, RHS);
15599 X86::CondCode Cond = CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B;
15600 return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
15601 DAG.getConstant(Cond, MVT::i8), BT);
15607 /// \brief - Turns an ISD::CondCode into a value suitable for SSE floating point
15609 static int translateX86FSETCC(ISD::CondCode SetCCOpcode, SDValue &Op0,
15614 // SSE Condition code mapping:
15623 switch (SetCCOpcode) {
15624 default: llvm_unreachable("Unexpected SETCC condition");
15626 case ISD::SETEQ: SSECC = 0; break;
15628 case ISD::SETGT: Swap = true; // Fallthrough
15630 case ISD::SETOLT: SSECC = 1; break;
15632 case ISD::SETGE: Swap = true; // Fallthrough
15634 case ISD::SETOLE: SSECC = 2; break;
15635 case ISD::SETUO: SSECC = 3; break;
15637 case ISD::SETNE: SSECC = 4; break;
15638 case ISD::SETULE: Swap = true; // Fallthrough
15639 case ISD::SETUGE: SSECC = 5; break;
15640 case ISD::SETULT: Swap = true; // Fallthrough
15641 case ISD::SETUGT: SSECC = 6; break;
15642 case ISD::SETO: SSECC = 7; break;
15644 case ISD::SETONE: SSECC = 8; break;
15647 std::swap(Op0, Op1);
15652 // Lower256IntVSETCC - Break a VSETCC 256-bit integer VSETCC into two new 128
15653 // ones, and then concatenate the result back.
15654 static SDValue Lower256IntVSETCC(SDValue Op, SelectionDAG &DAG) {
15655 MVT VT = Op.getSimpleValueType();
15657 assert(VT.is256BitVector() && Op.getOpcode() == ISD::SETCC &&
15658 "Unsupported value type for operation");
15660 unsigned NumElems = VT.getVectorNumElements();
15662 SDValue CC = Op.getOperand(2);
15664 // Extract the LHS vectors
15665 SDValue LHS = Op.getOperand(0);
15666 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
15667 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
15669 // Extract the RHS vectors
15670 SDValue RHS = Op.getOperand(1);
15671 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
15672 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
15674 // Issue the operation on the smaller types and concatenate the result back
15675 MVT EltVT = VT.getVectorElementType();
15676 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
15677 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
15678 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1, CC),
15679 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2, CC));
15682 static SDValue LowerIntVSETCC_AVX512(SDValue Op, SelectionDAG &DAG,
15683 const X86Subtarget *Subtarget) {
15684 SDValue Op0 = Op.getOperand(0);
15685 SDValue Op1 = Op.getOperand(1);
15686 SDValue CC = Op.getOperand(2);
15687 MVT VT = Op.getSimpleValueType();
15690 assert(Op0.getValueType().getVectorElementType().getSizeInBits() >= 8 &&
15691 Op.getValueType().getScalarType() == MVT::i1 &&
15692 "Cannot set masked compare for this operation");
15694 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15696 bool Unsigned = false;
15699 switch (SetCCOpcode) {
15700 default: llvm_unreachable("Unexpected SETCC condition");
15701 case ISD::SETNE: SSECC = 4; break;
15702 case ISD::SETEQ: Opc = X86ISD::PCMPEQM; break;
15703 case ISD::SETUGT: SSECC = 6; Unsigned = true; break;
15704 case ISD::SETLT: Swap = true; //fall-through
15705 case ISD::SETGT: Opc = X86ISD::PCMPGTM; break;
15706 case ISD::SETULT: SSECC = 1; Unsigned = true; break;
15707 case ISD::SETUGE: SSECC = 5; Unsigned = true; break; //NLT
15708 case ISD::SETGE: Swap = true; SSECC = 2; break; // LE + swap
15709 case ISD::SETULE: Unsigned = true; //fall-through
15710 case ISD::SETLE: SSECC = 2; break;
15714 std::swap(Op0, Op1);
15716 return DAG.getNode(Opc, dl, VT, Op0, Op1);
15717 Opc = Unsigned ? X86ISD::CMPMU: X86ISD::CMPM;
15718 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15719 DAG.getConstant(SSECC, MVT::i8));
15722 /// \brief Try to turn a VSETULT into a VSETULE by modifying its second
15723 /// operand \p Op1. If non-trivial (for example because it's not constant)
15724 /// return an empty value.
15725 static SDValue ChangeVSETULTtoVSETULE(SDLoc dl, SDValue Op1, SelectionDAG &DAG)
15727 BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Op1.getNode());
15731 MVT VT = Op1.getSimpleValueType();
15732 MVT EVT = VT.getVectorElementType();
15733 unsigned n = VT.getVectorNumElements();
15734 SmallVector<SDValue, 8> ULTOp1;
15736 for (unsigned i = 0; i < n; ++i) {
15737 ConstantSDNode *Elt = dyn_cast<ConstantSDNode>(BV->getOperand(i));
15738 if (!Elt || Elt->isOpaque() || Elt->getValueType(0) != EVT)
15741 // Avoid underflow.
15742 APInt Val = Elt->getAPIntValue();
15746 ULTOp1.push_back(DAG.getConstant(Val - 1, EVT));
15749 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, ULTOp1);
15752 static SDValue LowerVSETCC(SDValue Op, const X86Subtarget *Subtarget,
15753 SelectionDAG &DAG) {
15754 SDValue Op0 = Op.getOperand(0);
15755 SDValue Op1 = Op.getOperand(1);
15756 SDValue CC = Op.getOperand(2);
15757 MVT VT = Op.getSimpleValueType();
15758 ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
15759 bool isFP = Op.getOperand(1).getSimpleValueType().isFloatingPoint();
15764 MVT EltVT = Op0.getSimpleValueType().getVectorElementType();
15765 assert(EltVT == MVT::f32 || EltVT == MVT::f64);
15768 unsigned SSECC = translateX86FSETCC(SetCCOpcode, Op0, Op1);
15769 unsigned Opc = X86ISD::CMPP;
15770 if (Subtarget->hasAVX512() && VT.getVectorElementType() == MVT::i1) {
15771 assert(VT.getVectorNumElements() <= 16);
15772 Opc = X86ISD::CMPM;
15774 // In the two special cases we can't handle, emit two comparisons.
15777 unsigned CombineOpc;
15778 if (SetCCOpcode == ISD::SETUEQ) {
15779 CC0 = 3; CC1 = 0; CombineOpc = ISD::OR;
15781 assert(SetCCOpcode == ISD::SETONE);
15782 CC0 = 7; CC1 = 4; CombineOpc = ISD::AND;
15785 SDValue Cmp0 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15786 DAG.getConstant(CC0, MVT::i8));
15787 SDValue Cmp1 = DAG.getNode(Opc, dl, VT, Op0, Op1,
15788 DAG.getConstant(CC1, MVT::i8));
15789 return DAG.getNode(CombineOpc, dl, VT, Cmp0, Cmp1);
15791 // Handle all other FP comparisons here.
15792 return DAG.getNode(Opc, dl, VT, Op0, Op1,
15793 DAG.getConstant(SSECC, MVT::i8));
15796 // Break 256-bit integer vector compare into smaller ones.
15797 if (VT.is256BitVector() && !Subtarget->hasInt256())
15798 return Lower256IntVSETCC(Op, DAG);
15800 bool MaskResult = (VT.getVectorElementType() == MVT::i1);
15801 EVT OpVT = Op1.getValueType();
15802 if (Subtarget->hasAVX512()) {
15803 if (Op1.getValueType().is512BitVector() ||
15804 (Subtarget->hasBWI() && Subtarget->hasVLX()) ||
15805 (MaskResult && OpVT.getVectorElementType().getSizeInBits() >= 32))
15806 return LowerIntVSETCC_AVX512(Op, DAG, Subtarget);
15808 // In AVX-512 architecture setcc returns mask with i1 elements,
15809 // But there is no compare instruction for i8 and i16 elements in KNL.
15810 // We are not talking about 512-bit operands in this case, these
15811 // types are illegal.
15813 (OpVT.getVectorElementType().getSizeInBits() < 32 &&
15814 OpVT.getVectorElementType().getSizeInBits() >= 8))
15815 return DAG.getNode(ISD::TRUNCATE, dl, VT,
15816 DAG.getNode(ISD::SETCC, dl, OpVT, Op0, Op1, CC));
15819 // We are handling one of the integer comparisons here. Since SSE only has
15820 // GT and EQ comparisons for integer, swapping operands and multiple
15821 // operations may be required for some comparisons.
15823 bool Swap = false, Invert = false, FlipSigns = false, MinMax = false;
15824 bool Subus = false;
15826 switch (SetCCOpcode) {
15827 default: llvm_unreachable("Unexpected SETCC condition");
15828 case ISD::SETNE: Invert = true;
15829 case ISD::SETEQ: Opc = X86ISD::PCMPEQ; break;
15830 case ISD::SETLT: Swap = true;
15831 case ISD::SETGT: Opc = X86ISD::PCMPGT; break;
15832 case ISD::SETGE: Swap = true;
15833 case ISD::SETLE: Opc = X86ISD::PCMPGT;
15834 Invert = true; break;
15835 case ISD::SETULT: Swap = true;
15836 case ISD::SETUGT: Opc = X86ISD::PCMPGT;
15837 FlipSigns = true; break;
15838 case ISD::SETUGE: Swap = true;
15839 case ISD::SETULE: Opc = X86ISD::PCMPGT;
15840 FlipSigns = true; Invert = true; break;
15843 // Special case: Use min/max operations for SETULE/SETUGE
15844 MVT VET = VT.getVectorElementType();
15846 (Subtarget->hasSSE41() && (VET >= MVT::i8 && VET <= MVT::i32))
15847 || (Subtarget->hasSSE2() && (VET == MVT::i8));
15850 switch (SetCCOpcode) {
15852 case ISD::SETULE: Opc = X86ISD::UMIN; MinMax = true; break;
15853 case ISD::SETUGE: Opc = X86ISD::UMAX; MinMax = true; break;
15856 if (MinMax) { Swap = false; Invert = false; FlipSigns = false; }
15859 bool hasSubus = Subtarget->hasSSE2() && (VET == MVT::i8 || VET == MVT::i16);
15860 if (!MinMax && hasSubus) {
15861 // As another special case, use PSUBUS[BW] when it's profitable. E.g. for
15863 // t = psubus Op0, Op1
15864 // pcmpeq t, <0..0>
15865 switch (SetCCOpcode) {
15867 case ISD::SETULT: {
15868 // If the comparison is against a constant we can turn this into a
15869 // setule. With psubus, setule does not require a swap. This is
15870 // beneficial because the constant in the register is no longer
15871 // destructed as the destination so it can be hoisted out of a loop.
15872 // Only do this pre-AVX since vpcmp* is no longer destructive.
15873 if (Subtarget->hasAVX())
15875 SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
15876 if (ULEOp1.getNode()) {
15878 Subus = true; Invert = false; Swap = false;
15882 // Psubus is better than flip-sign because it requires no inversion.
15883 case ISD::SETUGE: Subus = true; Invert = false; Swap = true; break;
15884 case ISD::SETULE: Subus = true; Invert = false; Swap = false; break;
15888 Opc = X86ISD::SUBUS;
15894 std::swap(Op0, Op1);
15896 // Check that the operation in question is available (most are plain SSE2,
15897 // but PCMPGTQ and PCMPEQQ have different requirements).
15898 if (VT == MVT::v2i64) {
15899 if (Opc == X86ISD::PCMPGT && !Subtarget->hasSSE42()) {
15900 assert(Subtarget->hasSSE2() && "Don't know how to lower!");
15902 // First cast everything to the right type.
15903 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15904 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15906 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15907 // bits of the inputs before performing those operations. The lower
15908 // compare is always unsigned.
15911 SB = DAG.getConstant(0x80000000U, MVT::v4i32);
15913 SDValue Sign = DAG.getConstant(0x80000000U, MVT::i32);
15914 SDValue Zero = DAG.getConstant(0x00000000U, MVT::i32);
15915 SB = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v4i32,
15916 Sign, Zero, Sign, Zero);
15918 Op0 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op0, SB);
15919 Op1 = DAG.getNode(ISD::XOR, dl, MVT::v4i32, Op1, SB);
15921 // Emulate PCMPGTQ with (hi1 > hi2) | ((hi1 == hi2) & (lo1 > lo2))
15922 SDValue GT = DAG.getNode(X86ISD::PCMPGT, dl, MVT::v4i32, Op0, Op1);
15923 SDValue EQ = DAG.getNode(X86ISD::PCMPEQ, dl, MVT::v4i32, Op0, Op1);
15925 // Create masks for only the low parts/high parts of the 64 bit integers.
15926 static const int MaskHi[] = { 1, 1, 3, 3 };
15927 static const int MaskLo[] = { 0, 0, 2, 2 };
15928 SDValue EQHi = DAG.getVectorShuffle(MVT::v4i32, dl, EQ, EQ, MaskHi);
15929 SDValue GTLo = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskLo);
15930 SDValue GTHi = DAG.getVectorShuffle(MVT::v4i32, dl, GT, GT, MaskHi);
15932 SDValue Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, EQHi, GTLo);
15933 Result = DAG.getNode(ISD::OR, dl, MVT::v4i32, Result, GTHi);
15936 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15938 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15941 if (Opc == X86ISD::PCMPEQ && !Subtarget->hasSSE41()) {
15942 // If pcmpeqq is missing but pcmpeqd is available synthesize pcmpeqq with
15943 // pcmpeqd + pshufd + pand.
15944 assert(Subtarget->hasSSE2() && !FlipSigns && "Don't know how to lower!");
15946 // First cast everything to the right type.
15947 Op0 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op0);
15948 Op1 = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op1);
15951 SDValue Result = DAG.getNode(Opc, dl, MVT::v4i32, Op0, Op1);
15953 // Make sure the lower and upper halves are both all-ones.
15954 static const int Mask[] = { 1, 0, 3, 2 };
15955 SDValue Shuf = DAG.getVectorShuffle(MVT::v4i32, dl, Result, Result, Mask);
15956 Result = DAG.getNode(ISD::AND, dl, MVT::v4i32, Result, Shuf);
15959 Result = DAG.getNOT(dl, Result, MVT::v4i32);
15961 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
15965 // Since SSE has no unsigned integer comparisons, we need to flip the sign
15966 // bits of the inputs before performing those operations.
15968 EVT EltVT = VT.getVectorElementType();
15969 SDValue SB = DAG.getConstant(APInt::getSignBit(EltVT.getSizeInBits()), VT);
15970 Op0 = DAG.getNode(ISD::XOR, dl, VT, Op0, SB);
15971 Op1 = DAG.getNode(ISD::XOR, dl, VT, Op1, SB);
15974 SDValue Result = DAG.getNode(Opc, dl, VT, Op0, Op1);
15976 // If the logical-not of the result is required, perform that now.
15978 Result = DAG.getNOT(dl, Result, VT);
15981 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Op0, Result);
15984 Result = DAG.getNode(X86ISD::PCMPEQ, dl, VT, Result,
15985 getZeroVector(VT, Subtarget, DAG, dl));
15990 SDValue X86TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const {
15992 MVT VT = Op.getSimpleValueType();
15994 if (VT.isVector()) return LowerVSETCC(Op, Subtarget, DAG);
15996 assert(((!Subtarget->hasAVX512() && VT == MVT::i8) || (VT == MVT::i1))
15997 && "SetCC type must be 8-bit or 1-bit integer");
15998 SDValue Op0 = Op.getOperand(0);
15999 SDValue Op1 = Op.getOperand(1);
16001 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get();
16003 // Optimize to BT if possible.
16004 // Lower (X & (1 << N)) == 0 to BT(X, N).
16005 // Lower ((X >>u N) & 1) != 0 to BT(X, N).
16006 // Lower ((X >>s N) & 1) != 0 to BT(X, N).
16007 if (Op0.getOpcode() == ISD::AND && Op0.hasOneUse() &&
16008 Op1.getOpcode() == ISD::Constant &&
16009 cast<ConstantSDNode>(Op1)->isNullValue() &&
16010 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16011 SDValue NewSetCC = LowerToBT(Op0, CC, dl, DAG);
16012 if (NewSetCC.getNode()) {
16014 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, NewSetCC);
16019 // Look for X == 0, X == 1, X != 0, or X != 1. We can simplify some forms of
16021 if (Op1.getOpcode() == ISD::Constant &&
16022 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1 ||
16023 cast<ConstantSDNode>(Op1)->isNullValue()) &&
16024 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16026 // If the input is a setcc, then reuse the input setcc or use a new one with
16027 // the inverted condition.
16028 if (Op0.getOpcode() == X86ISD::SETCC) {
16029 X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
16030 bool Invert = (CC == ISD::SETNE) ^
16031 cast<ConstantSDNode>(Op1)->isNullValue();
16035 CCode = X86::GetOppositeBranchCondition(CCode);
16036 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16037 DAG.getConstant(CCode, MVT::i8),
16038 Op0.getOperand(1));
16040 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16044 if ((Op0.getValueType() == MVT::i1) && (Op1.getOpcode() == ISD::Constant) &&
16045 (cast<ConstantSDNode>(Op1)->getZExtValue() == 1) &&
16046 (CC == ISD::SETEQ || CC == ISD::SETNE)) {
16048 ISD::CondCode NewCC = ISD::getSetCCInverse(CC, true);
16049 return DAG.getSetCC(dl, VT, Op0, DAG.getConstant(0, MVT::i1), NewCC);
16052 bool isFP = Op1.getSimpleValueType().isFloatingPoint();
16053 unsigned X86CC = TranslateX86CC(CC, isFP, Op0, Op1, DAG);
16054 if (X86CC == X86::COND_INVALID)
16057 SDValue EFLAGS = EmitCmp(Op0, Op1, X86CC, dl, DAG);
16058 EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
16059 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
16060 DAG.getConstant(X86CC, MVT::i8), EFLAGS);
16062 return DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, SetCC);
16066 // isX86LogicalCmp - Return true if opcode is a X86 logical comparison.
16067 static bool isX86LogicalCmp(SDValue Op) {
16068 unsigned Opc = Op.getNode()->getOpcode();
16069 if (Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI ||
16070 Opc == X86ISD::SAHF)
16072 if (Op.getResNo() == 1 &&
16073 (Opc == X86ISD::ADD ||
16074 Opc == X86ISD::SUB ||
16075 Opc == X86ISD::ADC ||
16076 Opc == X86ISD::SBB ||
16077 Opc == X86ISD::SMUL ||
16078 Opc == X86ISD::UMUL ||
16079 Opc == X86ISD::INC ||
16080 Opc == X86ISD::DEC ||
16081 Opc == X86ISD::OR ||
16082 Opc == X86ISD::XOR ||
16083 Opc == X86ISD::AND))
16086 if (Op.getResNo() == 2 && Opc == X86ISD::UMUL)
16092 static bool isTruncWithZeroHighBitsInput(SDValue V, SelectionDAG &DAG) {
16093 if (V.getOpcode() != ISD::TRUNCATE)
16096 SDValue VOp0 = V.getOperand(0);
16097 unsigned InBits = VOp0.getValueSizeInBits();
16098 unsigned Bits = V.getValueSizeInBits();
16099 return DAG.MaskedValueIsZero(VOp0, APInt::getHighBitsSet(InBits,InBits-Bits));
16102 SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
16103 bool addTest = true;
16104 SDValue Cond = Op.getOperand(0);
16105 SDValue Op1 = Op.getOperand(1);
16106 SDValue Op2 = Op.getOperand(2);
16108 EVT VT = Op1.getValueType();
16111 // Lower fp selects into a CMP/AND/ANDN/OR sequence when the necessary SSE ops
16112 // are available. Otherwise fp cmovs get lowered into a less efficient branch
16113 // sequence later on.
16114 if (Cond.getOpcode() == ISD::SETCC &&
16115 ((Subtarget->hasSSE2() && (VT == MVT::f32 || VT == MVT::f64)) ||
16116 (Subtarget->hasSSE1() && VT == MVT::f32)) &&
16117 VT == Cond.getOperand(0).getValueType() && Cond->hasOneUse()) {
16118 SDValue CondOp0 = Cond.getOperand(0), CondOp1 = Cond.getOperand(1);
16119 int SSECC = translateX86FSETCC(
16120 cast<CondCodeSDNode>(Cond.getOperand(2))->get(), CondOp0, CondOp1);
16123 if (Subtarget->hasAVX512()) {
16124 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CondOp0, CondOp1,
16125 DAG.getConstant(SSECC, MVT::i8));
16126 return DAG.getNode(X86ISD::SELECT, DL, VT, Cmp, Op1, Op2);
16128 SDValue Cmp = DAG.getNode(X86ISD::FSETCC, DL, VT, CondOp0, CondOp1,
16129 DAG.getConstant(SSECC, MVT::i8));
16130 SDValue AndN = DAG.getNode(X86ISD::FANDN, DL, VT, Cmp, Op2);
16131 SDValue And = DAG.getNode(X86ISD::FAND, DL, VT, Cmp, Op1);
16132 return DAG.getNode(X86ISD::FOR, DL, VT, AndN, And);
16136 if (Cond.getOpcode() == ISD::SETCC) {
16137 SDValue NewCond = LowerSETCC(Cond, DAG);
16138 if (NewCond.getNode())
16142 // (select (x == 0), -1, y) -> (sign_bit (x - 1)) | y
16143 // (select (x == 0), y, -1) -> ~(sign_bit (x - 1)) | y
16144 // (select (x != 0), y, -1) -> (sign_bit (x - 1)) | y
16145 // (select (x != 0), -1, y) -> ~(sign_bit (x - 1)) | y
16146 if (Cond.getOpcode() == X86ISD::SETCC &&
16147 Cond.getOperand(1).getOpcode() == X86ISD::CMP &&
16148 isZero(Cond.getOperand(1).getOperand(1))) {
16149 SDValue Cmp = Cond.getOperand(1);
16151 unsigned CondCode =cast<ConstantSDNode>(Cond.getOperand(0))->getZExtValue();
16153 if ((isAllOnes(Op1) || isAllOnes(Op2)) &&
16154 (CondCode == X86::COND_E || CondCode == X86::COND_NE)) {
16155 SDValue Y = isAllOnes(Op2) ? Op1 : Op2;
16157 SDValue CmpOp0 = Cmp.getOperand(0);
16158 // Apply further optimizations for special cases
16159 // (select (x != 0), -1, 0) -> neg & sbb
16160 // (select (x == 0), 0, -1) -> neg & sbb
16161 if (ConstantSDNode *YC = dyn_cast<ConstantSDNode>(Y))
16162 if (YC->isNullValue() &&
16163 (isAllOnes(Op1) == (CondCode == X86::COND_NE))) {
16164 SDVTList VTs = DAG.getVTList(CmpOp0.getValueType(), MVT::i32);
16165 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, VTs,
16166 DAG.getConstant(0, CmpOp0.getValueType()),
16168 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16169 DAG.getConstant(X86::COND_B, MVT::i8),
16170 SDValue(Neg.getNode(), 1));
16174 Cmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32,
16175 CmpOp0, DAG.getConstant(1, CmpOp0.getValueType()));
16176 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16178 SDValue Res = // Res = 0 or -1.
16179 DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16180 DAG.getConstant(X86::COND_B, MVT::i8), Cmp);
16182 if (isAllOnes(Op1) != (CondCode == X86::COND_E))
16183 Res = DAG.getNOT(DL, Res, Res.getValueType());
16185 ConstantSDNode *N2C = dyn_cast<ConstantSDNode>(Op2);
16186 if (!N2C || !N2C->isNullValue())
16187 Res = DAG.getNode(ISD::OR, DL, Res.getValueType(), Res, Y);
16192 // Look past (and (setcc_carry (cmp ...)), 1).
16193 if (Cond.getOpcode() == ISD::AND &&
16194 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16195 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16196 if (C && C->getAPIntValue() == 1)
16197 Cond = Cond.getOperand(0);
16200 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16201 // setting operand in place of the X86ISD::SETCC.
16202 unsigned CondOpcode = Cond.getOpcode();
16203 if (CondOpcode == X86ISD::SETCC ||
16204 CondOpcode == X86ISD::SETCC_CARRY) {
16205 CC = Cond.getOperand(0);
16207 SDValue Cmp = Cond.getOperand(1);
16208 unsigned Opc = Cmp.getOpcode();
16209 MVT VT = Op.getSimpleValueType();
16211 bool IllegalFPCMov = false;
16212 if (VT.isFloatingPoint() && !VT.isVector() &&
16213 !isScalarFPTypeInSSEReg(VT)) // FPStack?
16214 IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSExtValue());
16216 if ((isX86LogicalCmp(Cmp) && !IllegalFPCMov) ||
16217 Opc == X86ISD::BT) { // FIXME
16221 } else if (CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16222 CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16223 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16224 Cond.getOperand(0).getValueType() != MVT::i8)) {
16225 SDValue LHS = Cond.getOperand(0);
16226 SDValue RHS = Cond.getOperand(1);
16227 unsigned X86Opcode;
16230 switch (CondOpcode) {
16231 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16232 case ISD::SADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16233 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16234 case ISD::SSUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16235 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16236 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16237 default: llvm_unreachable("unexpected overflowing operator");
16239 if (CondOpcode == ISD::UMULO)
16240 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16243 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16245 SDValue X86Op = DAG.getNode(X86Opcode, DL, VTs, LHS, RHS);
16247 if (CondOpcode == ISD::UMULO)
16248 Cond = X86Op.getValue(2);
16250 Cond = X86Op.getValue(1);
16252 CC = DAG.getConstant(X86Cond, MVT::i8);
16257 // Look pass the truncate if the high bits are known zero.
16258 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16259 Cond = Cond.getOperand(0);
16261 // We know the result of AND is compared against zero. Try to match
16263 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16264 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, DL, DAG);
16265 if (NewSetCC.getNode()) {
16266 CC = NewSetCC.getOperand(0);
16267 Cond = NewSetCC.getOperand(1);
16274 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16275 Cond = EmitTest(Cond, X86::COND_NE, DL, DAG);
16278 // a < b ? -1 : 0 -> RES = ~setcc_carry
16279 // a < b ? 0 : -1 -> RES = setcc_carry
16280 // a >= b ? -1 : 0 -> RES = setcc_carry
16281 // a >= b ? 0 : -1 -> RES = ~setcc_carry
16282 if (Cond.getOpcode() == X86ISD::SUB) {
16283 Cond = ConvertCmpIfNecessary(Cond, DAG);
16284 unsigned CondCode = cast<ConstantSDNode>(CC)->getZExtValue();
16286 if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
16287 (isAllOnes(Op1) || isAllOnes(Op2)) && (isZero(Op1) || isZero(Op2))) {
16288 SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
16289 DAG.getConstant(X86::COND_B, MVT::i8), Cond);
16290 if (isAllOnes(Op1) != (CondCode == X86::COND_B))
16291 return DAG.getNOT(DL, Res, Res.getValueType());
16296 // X86 doesn't have an i8 cmov. If both operands are the result of a truncate
16297 // widen the cmov and push the truncate through. This avoids introducing a new
16298 // branch during isel and doesn't add any extensions.
16299 if (Op.getValueType() == MVT::i8 &&
16300 Op1.getOpcode() == ISD::TRUNCATE && Op2.getOpcode() == ISD::TRUNCATE) {
16301 SDValue T1 = Op1.getOperand(0), T2 = Op2.getOperand(0);
16302 if (T1.getValueType() == T2.getValueType() &&
16303 // Blacklist CopyFromReg to avoid partial register stalls.
16304 T1.getOpcode() != ISD::CopyFromReg && T2.getOpcode()!=ISD::CopyFromReg){
16305 SDVTList VTs = DAG.getVTList(T1.getValueType(), MVT::Glue);
16306 SDValue Cmov = DAG.getNode(X86ISD::CMOV, DL, VTs, T2, T1, CC, Cond);
16307 return DAG.getNode(ISD::TRUNCATE, DL, Op.getValueType(), Cmov);
16311 // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
16312 // condition is true.
16313 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
16314 SDValue Ops[] = { Op2, Op1, CC, Cond };
16315 return DAG.getNode(X86ISD::CMOV, DL, VTs, Ops);
16318 static SDValue LowerSIGN_EXTEND_AVX512(SDValue Op, const X86Subtarget *Subtarget,
16319 SelectionDAG &DAG) {
16320 MVT VT = Op->getSimpleValueType(0);
16321 SDValue In = Op->getOperand(0);
16322 MVT InVT = In.getSimpleValueType();
16323 MVT VTElt = VT.getVectorElementType();
16324 MVT InVTElt = InVT.getVectorElementType();
16328 if ((InVTElt == MVT::i1) &&
16329 (((Subtarget->hasBWI() && Subtarget->hasVLX() &&
16330 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() <= 16)) ||
16332 ((Subtarget->hasBWI() && VT.is512BitVector() &&
16333 VTElt.getSizeInBits() <= 16)) ||
16335 ((Subtarget->hasDQI() && Subtarget->hasVLX() &&
16336 VT.getSizeInBits() <= 256 && VTElt.getSizeInBits() >= 32)) ||
16338 ((Subtarget->hasDQI() && VT.is512BitVector() &&
16339 VTElt.getSizeInBits() >= 32))))
16340 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16342 unsigned int NumElts = VT.getVectorNumElements();
16344 if (NumElts != 8 && NumElts != 16)
16347 if (VT.is512BitVector() && InVT.getVectorElementType() != MVT::i1) {
16348 if (In.getOpcode() == X86ISD::VSEXT || In.getOpcode() == X86ISD::VZEXT)
16349 return DAG.getNode(In.getOpcode(), dl, VT, In.getOperand(0));
16350 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16353 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16354 assert (InVT.getVectorElementType() == MVT::i1 && "Unexpected vector type");
16356 MVT ExtVT = (NumElts == 8) ? MVT::v8i64 : MVT::v16i32;
16357 Constant *C = ConstantInt::get(*DAG.getContext(),
16358 APInt::getAllOnesValue(ExtVT.getScalarType().getSizeInBits()));
16360 SDValue CP = DAG.getConstantPool(C, TLI.getPointerTy());
16361 unsigned Alignment = cast<ConstantPoolSDNode>(CP)->getAlignment();
16362 SDValue Ld = DAG.getLoad(ExtVT.getScalarType(), dl, DAG.getEntryNode(), CP,
16363 MachinePointerInfo::getConstantPool(),
16364 false, false, false, Alignment);
16365 SDValue Brcst = DAG.getNode(X86ISD::VBROADCASTM, dl, ExtVT, In, Ld);
16366 if (VT.is512BitVector())
16368 return DAG.getNode(X86ISD::VTRUNC, dl, VT, Brcst);
16371 static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget *Subtarget,
16372 SelectionDAG &DAG) {
16373 MVT VT = Op->getSimpleValueType(0);
16374 SDValue In = Op->getOperand(0);
16375 MVT InVT = In.getSimpleValueType();
16378 if (VT.is512BitVector() || InVT.getVectorElementType() == MVT::i1)
16379 return LowerSIGN_EXTEND_AVX512(Op, Subtarget, DAG);
16381 if ((VT != MVT::v4i64 || InVT != MVT::v4i32) &&
16382 (VT != MVT::v8i32 || InVT != MVT::v8i16) &&
16383 (VT != MVT::v16i16 || InVT != MVT::v16i8))
16386 if (Subtarget->hasInt256())
16387 return DAG.getNode(X86ISD::VSEXT, dl, VT, In);
16389 // Optimize vectors in AVX mode
16390 // Sign extend v8i16 to v8i32 and
16393 // Divide input vector into two parts
16394 // for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
16395 // use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
16396 // concat the vectors to original VT
16398 unsigned NumElems = InVT.getVectorNumElements();
16399 SDValue Undef = DAG.getUNDEF(InVT);
16401 SmallVector<int,8> ShufMask1(NumElems, -1);
16402 for (unsigned i = 0; i != NumElems/2; ++i)
16405 SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask1[0]);
16407 SmallVector<int,8> ShufMask2(NumElems, -1);
16408 for (unsigned i = 0; i != NumElems/2; ++i)
16409 ShufMask2[i] = i + NumElems/2;
16411 SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, &ShufMask2[0]);
16413 MVT HalfVT = MVT::getVectorVT(VT.getScalarType(),
16414 VT.getVectorNumElements()/2);
16416 OpLo = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpLo);
16417 OpHi = DAG.getNode(X86ISD::VSEXT, dl, HalfVT, OpHi);
16419 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
16422 // Lower vector extended loads using a shuffle. If SSSE3 is not available we
16423 // may emit an illegal shuffle but the expansion is still better than scalar
16424 // code. We generate X86ISD::VSEXT for SEXTLOADs if it's available, otherwise
16425 // we'll emit a shuffle and a arithmetic shift.
16426 // FIXME: Is the expansion actually better than scalar code? It doesn't seem so.
16427 // TODO: It is possible to support ZExt by zeroing the undef values during
16428 // the shuffle phase or after the shuffle.
16429 static SDValue LowerExtendedLoad(SDValue Op, const X86Subtarget *Subtarget,
16430 SelectionDAG &DAG) {
16431 MVT RegVT = Op.getSimpleValueType();
16432 assert(RegVT.isVector() && "We only custom lower vector sext loads.");
16433 assert(RegVT.isInteger() &&
16434 "We only custom lower integer vector sext loads.");
16436 // Nothing useful we can do without SSE2 shuffles.
16437 assert(Subtarget->hasSSE2() && "We only custom lower sext loads with SSE2.");
16439 LoadSDNode *Ld = cast<LoadSDNode>(Op.getNode());
16441 EVT MemVT = Ld->getMemoryVT();
16442 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16443 unsigned RegSz = RegVT.getSizeInBits();
16445 ISD::LoadExtType Ext = Ld->getExtensionType();
16447 assert((Ext == ISD::EXTLOAD || Ext == ISD::SEXTLOAD)
16448 && "Only anyext and sext are currently implemented.");
16449 assert(MemVT != RegVT && "Cannot extend to the same type");
16450 assert(MemVT.isVector() && "Must load a vector from memory");
16452 unsigned NumElems = RegVT.getVectorNumElements();
16453 unsigned MemSz = MemVT.getSizeInBits();
16454 assert(RegSz > MemSz && "Register size must be greater than the mem size");
16456 if (Ext == ISD::SEXTLOAD && RegSz == 256 && !Subtarget->hasInt256()) {
16457 // The only way in which we have a legal 256-bit vector result but not the
16458 // integer 256-bit operations needed to directly lower a sextload is if we
16459 // have AVX1 but not AVX2. In that case, we can always emit a sextload to
16460 // a 128-bit vector and a normal sign_extend to 256-bits that should get
16461 // correctly legalized. We do this late to allow the canonical form of
16462 // sextload to persist throughout the rest of the DAG combiner -- it wants
16463 // to fold together any extensions it can, and so will fuse a sign_extend
16464 // of an sextload into a sextload targeting a wider value.
16466 if (MemSz == 128) {
16467 // Just switch this to a normal load.
16468 assert(TLI.isTypeLegal(MemVT) && "If the memory type is a 128-bit type, "
16469 "it must be a legal 128-bit vector "
16471 Load = DAG.getLoad(MemVT, dl, Ld->getChain(), Ld->getBasePtr(),
16472 Ld->getPointerInfo(), Ld->isVolatile(), Ld->isNonTemporal(),
16473 Ld->isInvariant(), Ld->getAlignment());
16475 assert(MemSz < 128 &&
16476 "Can't extend a type wider than 128 bits to a 256 bit vector!");
16477 // Do an sext load to a 128-bit vector type. We want to use the same
16478 // number of elements, but elements half as wide. This will end up being
16479 // recursively lowered by this routine, but will succeed as we definitely
16480 // have all the necessary features if we're using AVX1.
16482 EVT::getIntegerVT(*DAG.getContext(), RegVT.getScalarSizeInBits() / 2);
16483 EVT HalfVecVT = EVT::getVectorVT(*DAG.getContext(), HalfEltVT, NumElems);
16485 DAG.getExtLoad(Ext, dl, HalfVecVT, Ld->getChain(), Ld->getBasePtr(),
16486 Ld->getPointerInfo(), MemVT, Ld->isVolatile(),
16487 Ld->isNonTemporal(), Ld->isInvariant(),
16488 Ld->getAlignment());
16491 // Replace chain users with the new chain.
16492 assert(Load->getNumValues() == 2 && "Loads must carry a chain!");
16493 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), Load.getValue(1));
16495 // Finally, do a normal sign-extend to the desired register.
16496 return DAG.getSExtOrTrunc(Load, dl, RegVT);
16499 // All sizes must be a power of two.
16500 assert(isPowerOf2_32(RegSz * MemSz * NumElems) &&
16501 "Non-power-of-two elements are not custom lowered!");
16503 // Attempt to load the original value using scalar loads.
16504 // Find the largest scalar type that divides the total loaded size.
16505 MVT SclrLoadTy = MVT::i8;
16506 for (MVT Tp : MVT::integer_valuetypes()) {
16507 if (TLI.isTypeLegal(Tp) && ((MemSz % Tp.getSizeInBits()) == 0)) {
16512 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
16513 if (TLI.isTypeLegal(MVT::f64) && SclrLoadTy.getSizeInBits() < 64 &&
16515 SclrLoadTy = MVT::f64;
16517 // Calculate the number of scalar loads that we need to perform
16518 // in order to load our vector from memory.
16519 unsigned NumLoads = MemSz / SclrLoadTy.getSizeInBits();
16521 assert((Ext != ISD::SEXTLOAD || NumLoads == 1) &&
16522 "Can only lower sext loads with a single scalar load!");
16524 unsigned loadRegZize = RegSz;
16525 if (Ext == ISD::SEXTLOAD && RegSz == 256)
16528 // Represent our vector as a sequence of elements which are the
16529 // largest scalar that we can load.
16530 EVT LoadUnitVecVT = EVT::getVectorVT(
16531 *DAG.getContext(), SclrLoadTy, loadRegZize / SclrLoadTy.getSizeInBits());
16533 // Represent the data using the same element type that is stored in
16534 // memory. In practice, we ''widen'' MemVT.
16536 EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
16537 loadRegZize / MemVT.getScalarType().getSizeInBits());
16539 assert(WideVecVT.getSizeInBits() == LoadUnitVecVT.getSizeInBits() &&
16540 "Invalid vector type");
16542 // We can't shuffle using an illegal type.
16543 assert(TLI.isTypeLegal(WideVecVT) &&
16544 "We only lower types that form legal widened vector types");
16546 SmallVector<SDValue, 8> Chains;
16547 SDValue Ptr = Ld->getBasePtr();
16548 SDValue Increment =
16549 DAG.getConstant(SclrLoadTy.getSizeInBits() / 8, TLI.getPointerTy());
16550 SDValue Res = DAG.getUNDEF(LoadUnitVecVT);
16552 for (unsigned i = 0; i < NumLoads; ++i) {
16553 // Perform a single load.
16554 SDValue ScalarLoad =
16555 DAG.getLoad(SclrLoadTy, dl, Ld->getChain(), Ptr, Ld->getPointerInfo(),
16556 Ld->isVolatile(), Ld->isNonTemporal(), Ld->isInvariant(),
16557 Ld->getAlignment());
16558 Chains.push_back(ScalarLoad.getValue(1));
16559 // Create the first element type using SCALAR_TO_VECTOR in order to avoid
16560 // another round of DAGCombining.
16562 Res = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, LoadUnitVecVT, ScalarLoad);
16564 Res = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, LoadUnitVecVT, Res,
16565 ScalarLoad, DAG.getIntPtrConstant(i));
16567 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
16570 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
16572 // Bitcast the loaded value to a vector of the original element type, in
16573 // the size of the target vector type.
16574 SDValue SlicedVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Res);
16575 unsigned SizeRatio = RegSz / MemSz;
16577 if (Ext == ISD::SEXTLOAD) {
16578 // If we have SSE4.1, we can directly emit a VSEXT node.
16579 if (Subtarget->hasSSE41()) {
16580 SDValue Sext = DAG.getNode(X86ISD::VSEXT, dl, RegVT, SlicedVec);
16581 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16585 // Otherwise we'll shuffle the small elements in the high bits of the
16586 // larger type and perform an arithmetic shift. If the shift is not legal
16587 // it's better to scalarize.
16588 assert(TLI.isOperationLegalOrCustom(ISD::SRA, RegVT) &&
16589 "We can't implement a sext load without an arithmetic right shift!");
16591 // Redistribute the loaded elements into the different locations.
16592 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16593 for (unsigned i = 0; i != NumElems; ++i)
16594 ShuffleVec[i * SizeRatio + SizeRatio - 1] = i;
16596 SDValue Shuff = DAG.getVectorShuffle(
16597 WideVecVT, dl, SlicedVec, DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16599 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16601 // Build the arithmetic shift.
16602 unsigned Amt = RegVT.getVectorElementType().getSizeInBits() -
16603 MemVT.getVectorElementType().getSizeInBits();
16605 DAG.getNode(ISD::SRA, dl, RegVT, Shuff, DAG.getConstant(Amt, RegVT));
16607 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16611 // Redistribute the loaded elements into the different locations.
16612 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
16613 for (unsigned i = 0; i != NumElems; ++i)
16614 ShuffleVec[i * SizeRatio] = i;
16616 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, SlicedVec,
16617 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
16619 // Bitcast to the requested type.
16620 Shuff = DAG.getNode(ISD::BITCAST, dl, RegVT, Shuff);
16621 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), TF);
16625 // isAndOrOfSingleUseSetCCs - Return true if node is an ISD::AND or
16626 // ISD::OR of two X86ISD::SETCC nodes each of which has no other use apart
16627 // from the AND / OR.
16628 static bool isAndOrOfSetCCs(SDValue Op, unsigned &Opc) {
16629 Opc = Op.getOpcode();
16630 if (Opc != ISD::OR && Opc != ISD::AND)
16632 return (Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16633 Op.getOperand(0).hasOneUse() &&
16634 Op.getOperand(1).getOpcode() == X86ISD::SETCC &&
16635 Op.getOperand(1).hasOneUse());
16638 // isXor1OfSetCC - Return true if node is an ISD::XOR of a X86ISD::SETCC and
16639 // 1 and that the SETCC node has a single use.
16640 static bool isXor1OfSetCC(SDValue Op) {
16641 if (Op.getOpcode() != ISD::XOR)
16643 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
16644 if (N1C && N1C->getAPIntValue() == 1) {
16645 return Op.getOperand(0).getOpcode() == X86ISD::SETCC &&
16646 Op.getOperand(0).hasOneUse();
16651 SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
16652 bool addTest = true;
16653 SDValue Chain = Op.getOperand(0);
16654 SDValue Cond = Op.getOperand(1);
16655 SDValue Dest = Op.getOperand(2);
16658 bool Inverted = false;
16660 if (Cond.getOpcode() == ISD::SETCC) {
16661 // Check for setcc([su]{add,sub,mul}o == 0).
16662 if (cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETEQ &&
16663 isa<ConstantSDNode>(Cond.getOperand(1)) &&
16664 cast<ConstantSDNode>(Cond.getOperand(1))->isNullValue() &&
16665 Cond.getOperand(0).getResNo() == 1 &&
16666 (Cond.getOperand(0).getOpcode() == ISD::SADDO ||
16667 Cond.getOperand(0).getOpcode() == ISD::UADDO ||
16668 Cond.getOperand(0).getOpcode() == ISD::SSUBO ||
16669 Cond.getOperand(0).getOpcode() == ISD::USUBO ||
16670 Cond.getOperand(0).getOpcode() == ISD::SMULO ||
16671 Cond.getOperand(0).getOpcode() == ISD::UMULO)) {
16673 Cond = Cond.getOperand(0);
16675 SDValue NewCond = LowerSETCC(Cond, DAG);
16676 if (NewCond.getNode())
16681 // FIXME: LowerXALUO doesn't handle these!!
16682 else if (Cond.getOpcode() == X86ISD::ADD ||
16683 Cond.getOpcode() == X86ISD::SUB ||
16684 Cond.getOpcode() == X86ISD::SMUL ||
16685 Cond.getOpcode() == X86ISD::UMUL)
16686 Cond = LowerXALUO(Cond, DAG);
16689 // Look pass (and (setcc_carry (cmp ...)), 1).
16690 if (Cond.getOpcode() == ISD::AND &&
16691 Cond.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY) {
16692 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Cond.getOperand(1));
16693 if (C && C->getAPIntValue() == 1)
16694 Cond = Cond.getOperand(0);
16697 // If condition flag is set by a X86ISD::CMP, then use it as the condition
16698 // setting operand in place of the X86ISD::SETCC.
16699 unsigned CondOpcode = Cond.getOpcode();
16700 if (CondOpcode == X86ISD::SETCC ||
16701 CondOpcode == X86ISD::SETCC_CARRY) {
16702 CC = Cond.getOperand(0);
16704 SDValue Cmp = Cond.getOperand(1);
16705 unsigned Opc = Cmp.getOpcode();
16706 // FIXME: WHY THE SPECIAL CASING OF LogicalCmp??
16707 if (isX86LogicalCmp(Cmp) || Opc == X86ISD::BT) {
16711 switch (cast<ConstantSDNode>(CC)->getZExtValue()) {
16715 // These can only come from an arithmetic instruction with overflow,
16716 // e.g. SADDO, UADDO.
16717 Cond = Cond.getNode()->getOperand(1);
16723 CondOpcode = Cond.getOpcode();
16724 if (CondOpcode == ISD::UADDO || CondOpcode == ISD::SADDO ||
16725 CondOpcode == ISD::USUBO || CondOpcode == ISD::SSUBO ||
16726 ((CondOpcode == ISD::UMULO || CondOpcode == ISD::SMULO) &&
16727 Cond.getOperand(0).getValueType() != MVT::i8)) {
16728 SDValue LHS = Cond.getOperand(0);
16729 SDValue RHS = Cond.getOperand(1);
16730 unsigned X86Opcode;
16733 // Keep this in sync with LowerXALUO, otherwise we might create redundant
16734 // instructions that can't be removed afterwards (i.e. X86ISD::ADD and
16736 switch (CondOpcode) {
16737 case ISD::UADDO: X86Opcode = X86ISD::ADD; X86Cond = X86::COND_B; break;
16739 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16741 X86Opcode = X86ISD::INC; X86Cond = X86::COND_O;
16744 X86Opcode = X86ISD::ADD; X86Cond = X86::COND_O; break;
16745 case ISD::USUBO: X86Opcode = X86ISD::SUB; X86Cond = X86::COND_B; break;
16747 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
16749 X86Opcode = X86ISD::DEC; X86Cond = X86::COND_O;
16752 X86Opcode = X86ISD::SUB; X86Cond = X86::COND_O; break;
16753 case ISD::UMULO: X86Opcode = X86ISD::UMUL; X86Cond = X86::COND_O; break;
16754 case ISD::SMULO: X86Opcode = X86ISD::SMUL; X86Cond = X86::COND_O; break;
16755 default: llvm_unreachable("unexpected overflowing operator");
16758 X86Cond = X86::GetOppositeBranchCondition((X86::CondCode)X86Cond);
16759 if (CondOpcode == ISD::UMULO)
16760 VTs = DAG.getVTList(LHS.getValueType(), LHS.getValueType(),
16763 VTs = DAG.getVTList(LHS.getValueType(), MVT::i32);
16765 SDValue X86Op = DAG.getNode(X86Opcode, dl, VTs, LHS, RHS);
16767 if (CondOpcode == ISD::UMULO)
16768 Cond = X86Op.getValue(2);
16770 Cond = X86Op.getValue(1);
16772 CC = DAG.getConstant(X86Cond, MVT::i8);
16776 if (Cond.hasOneUse() && isAndOrOfSetCCs(Cond, CondOpc)) {
16777 SDValue Cmp = Cond.getOperand(0).getOperand(1);
16778 if (CondOpc == ISD::OR) {
16779 // Also, recognize the pattern generated by an FCMP_UNE. We can emit
16780 // two branches instead of an explicit OR instruction with a
16782 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16783 isX86LogicalCmp(Cmp)) {
16784 CC = Cond.getOperand(0).getOperand(0);
16785 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16786 Chain, Dest, CC, Cmp);
16787 CC = Cond.getOperand(1).getOperand(0);
16791 } else { // ISD::AND
16792 // Also, recognize the pattern generated by an FCMP_OEQ. We can emit
16793 // two branches instead of an explicit AND instruction with a
16794 // separate test. However, we only do this if this block doesn't
16795 // have a fall-through edge, because this requires an explicit
16796 // jmp when the condition is false.
16797 if (Cmp == Cond.getOperand(1).getOperand(1) &&
16798 isX86LogicalCmp(Cmp) &&
16799 Op.getNode()->hasOneUse()) {
16800 X86::CondCode CCode =
16801 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16802 CCode = X86::GetOppositeBranchCondition(CCode);
16803 CC = DAG.getConstant(CCode, MVT::i8);
16804 SDNode *User = *Op.getNode()->use_begin();
16805 // Look for an unconditional branch following this conditional branch.
16806 // We need this because we need to reverse the successors in order
16807 // to implement FCMP_OEQ.
16808 if (User->getOpcode() == ISD::BR) {
16809 SDValue FalseBB = User->getOperand(1);
16811 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16812 assert(NewBR == User);
16816 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16817 Chain, Dest, CC, Cmp);
16818 X86::CondCode CCode =
16819 (X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
16820 CCode = X86::GetOppositeBranchCondition(CCode);
16821 CC = DAG.getConstant(CCode, MVT::i8);
16827 } else if (Cond.hasOneUse() && isXor1OfSetCC(Cond)) {
16828 // Recognize for xorb (setcc), 1 patterns. The xor inverts the condition.
16829 // It should be transformed during dag combiner except when the condition
16830 // is set by a arithmetics with overflow node.
16831 X86::CondCode CCode =
16832 (X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
16833 CCode = X86::GetOppositeBranchCondition(CCode);
16834 CC = DAG.getConstant(CCode, MVT::i8);
16835 Cond = Cond.getOperand(0).getOperand(1);
16837 } else if (Cond.getOpcode() == ISD::SETCC &&
16838 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETOEQ) {
16839 // For FCMP_OEQ, we can emit
16840 // two branches instead of an explicit AND instruction with a
16841 // separate test. However, we only do this if this block doesn't
16842 // have a fall-through edge, because this requires an explicit
16843 // jmp when the condition is false.
16844 if (Op.getNode()->hasOneUse()) {
16845 SDNode *User = *Op.getNode()->use_begin();
16846 // Look for an unconditional branch following this conditional branch.
16847 // We need this because we need to reverse the successors in order
16848 // to implement FCMP_OEQ.
16849 if (User->getOpcode() == ISD::BR) {
16850 SDValue FalseBB = User->getOperand(1);
16852 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16853 assert(NewBR == User);
16857 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16858 Cond.getOperand(0), Cond.getOperand(1));
16859 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16860 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16861 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16862 Chain, Dest, CC, Cmp);
16863 CC = DAG.getConstant(X86::COND_P, MVT::i8);
16868 } else if (Cond.getOpcode() == ISD::SETCC &&
16869 cast<CondCodeSDNode>(Cond.getOperand(2))->get() == ISD::SETUNE) {
16870 // For FCMP_UNE, we can emit
16871 // two branches instead of an explicit AND instruction with a
16872 // separate test. However, we only do this if this block doesn't
16873 // have a fall-through edge, because this requires an explicit
16874 // jmp when the condition is false.
16875 if (Op.getNode()->hasOneUse()) {
16876 SDNode *User = *Op.getNode()->use_begin();
16877 // Look for an unconditional branch following this conditional branch.
16878 // We need this because we need to reverse the successors in order
16879 // to implement FCMP_UNE.
16880 if (User->getOpcode() == ISD::BR) {
16881 SDValue FalseBB = User->getOperand(1);
16883 DAG.UpdateNodeOperands(User, User->getOperand(0), Dest);
16884 assert(NewBR == User);
16887 SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
16888 Cond.getOperand(0), Cond.getOperand(1));
16889 Cmp = ConvertCmpIfNecessary(Cmp, DAG);
16890 CC = DAG.getConstant(X86::COND_NE, MVT::i8);
16891 Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16892 Chain, Dest, CC, Cmp);
16893 CC = DAG.getConstant(X86::COND_NP, MVT::i8);
16903 // Look pass the truncate if the high bits are known zero.
16904 if (isTruncWithZeroHighBitsInput(Cond, DAG))
16905 Cond = Cond.getOperand(0);
16907 // We know the result of AND is compared against zero. Try to match
16909 if (Cond.getOpcode() == ISD::AND && Cond.hasOneUse()) {
16910 SDValue NewSetCC = LowerToBT(Cond, ISD::SETNE, dl, DAG);
16911 if (NewSetCC.getNode()) {
16912 CC = NewSetCC.getOperand(0);
16913 Cond = NewSetCC.getOperand(1);
16920 X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
16921 CC = DAG.getConstant(X86Cond, MVT::i8);
16922 Cond = EmitTest(Cond, X86Cond, dl, DAG);
16924 Cond = ConvertCmpIfNecessary(Cond, DAG);
16925 return DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
16926 Chain, Dest, CC, Cond);
16929 // Lower dynamic stack allocation to _alloca call for Cygwin/Mingw targets.
16930 // Calls to _alloca are needed to probe the stack when allocating more than 4k
16931 // bytes in one go. Touching the stack at 4K increments is necessary to ensure
16932 // that the guard pages used by the OS virtual memory manager are allocated in
16933 // correct sequence.
16935 X86TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
16936 SelectionDAG &DAG) const {
16937 MachineFunction &MF = DAG.getMachineFunction();
16938 bool SplitStack = MF.shouldSplitStack();
16939 bool Lower = (Subtarget->isOSWindows() && !Subtarget->isTargetMachO()) ||
16944 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
16945 SDNode* Node = Op.getNode();
16947 unsigned SPReg = TLI.getStackPointerRegisterToSaveRestore();
16948 assert(SPReg && "Target cannot require DYNAMIC_STACKALLOC expansion and"
16949 " not tell us which reg is the stack pointer!");
16950 EVT VT = Node->getValueType(0);
16951 SDValue Tmp1 = SDValue(Node, 0);
16952 SDValue Tmp2 = SDValue(Node, 1);
16953 SDValue Tmp3 = Node->getOperand(2);
16954 SDValue Chain = Tmp1.getOperand(0);
16956 // Chain the dynamic stack allocation so that it doesn't modify the stack
16957 // pointer when other instructions are using the stack.
16958 Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0, true),
16961 SDValue Size = Tmp2.getOperand(1);
16962 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
16963 Chain = SP.getValue(1);
16964 unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
16965 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
16966 unsigned StackAlign = TFI.getStackAlignment();
16967 Tmp1 = DAG.getNode(ISD::SUB, dl, VT, SP, Size); // Value
16968 if (Align > StackAlign)
16969 Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
16970 DAG.getConstant(-(uint64_t)Align, VT));
16971 Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
16973 Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, true),
16974 DAG.getIntPtrConstant(0, true), SDValue(),
16977 SDValue Ops[2] = { Tmp1, Tmp2 };
16978 return DAG.getMergeValues(Ops, dl);
16982 SDValue Chain = Op.getOperand(0);
16983 SDValue Size = Op.getOperand(1);
16984 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
16985 EVT VT = Op.getNode()->getValueType(0);
16987 bool Is64Bit = Subtarget->is64Bit();
16988 EVT SPTy = getPointerTy();
16991 MachineRegisterInfo &MRI = MF.getRegInfo();
16994 // The 64 bit implementation of segmented stacks needs to clobber both r10
16995 // r11. This makes it impossible to use it along with nested parameters.
16996 const Function *F = MF.getFunction();
16998 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
17000 if (I->hasNestAttr())
17001 report_fatal_error("Cannot use segmented stacks with functions that "
17002 "have nested arguments.");
17005 const TargetRegisterClass *AddrRegClass =
17006 getRegClassFor(getPointerTy());
17007 unsigned Vreg = MRI.createVirtualRegister(AddrRegClass);
17008 Chain = DAG.getCopyToReg(Chain, dl, Vreg, Size);
17009 SDValue Value = DAG.getNode(X86ISD::SEG_ALLOCA, dl, SPTy, Chain,
17010 DAG.getRegister(Vreg, SPTy));
17011 SDValue Ops1[2] = { Value, Chain };
17012 return DAG.getMergeValues(Ops1, dl);
17015 const unsigned Reg = (Subtarget->isTarget64BitLP64() ? X86::RAX : X86::EAX);
17017 Chain = DAG.getCopyToReg(Chain, dl, Reg, Size, Flag);
17018 Flag = Chain.getValue(1);
17019 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
17021 Chain = DAG.getNode(X86ISD::WIN_ALLOCA, dl, NodeTys, Chain, Flag);
17023 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
17024 unsigned SPReg = RegInfo->getStackRegister();
17025 SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, SPTy);
17026 Chain = SP.getValue(1);
17029 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
17030 DAG.getConstant(-(uint64_t)Align, VT));
17031 Chain = DAG.getCopyToReg(Chain, dl, SPReg, SP);
17034 SDValue Ops1[2] = { SP, Chain };
17035 return DAG.getMergeValues(Ops1, dl);
17039 SDValue X86TargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
17040 MachineFunction &MF = DAG.getMachineFunction();
17041 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
17043 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17046 if (!Subtarget->is64Bit() || Subtarget->isTargetWin64()) {
17047 // vastart just stores the address of the VarArgsFrameIndex slot into the
17048 // memory location argument.
17049 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17051 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1),
17052 MachinePointerInfo(SV), false, false, 0);
17056 // gp_offset (0 - 6 * 8)
17057 // fp_offset (48 - 48 + 8 * 16)
17058 // overflow_arg_area (point to parameters coming in memory).
17060 SmallVector<SDValue, 8> MemOps;
17061 SDValue FIN = Op.getOperand(1);
17063 SDValue Store = DAG.getStore(Op.getOperand(0), DL,
17064 DAG.getConstant(FuncInfo->getVarArgsGPOffset(),
17066 FIN, MachinePointerInfo(SV), false, false, 0);
17067 MemOps.push_back(Store);
17070 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17071 FIN, DAG.getIntPtrConstant(4));
17072 Store = DAG.getStore(Op.getOperand(0), DL,
17073 DAG.getConstant(FuncInfo->getVarArgsFPOffset(),
17075 FIN, MachinePointerInfo(SV, 4), false, false, 0);
17076 MemOps.push_back(Store);
17078 // Store ptr to overflow_arg_area
17079 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17080 FIN, DAG.getIntPtrConstant(4));
17081 SDValue OVFIN = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
17083 Store = DAG.getStore(Op.getOperand(0), DL, OVFIN, FIN,
17084 MachinePointerInfo(SV, 8),
17086 MemOps.push_back(Store);
17088 // Store ptr to reg_save_area.
17089 FIN = DAG.getNode(ISD::ADD, DL, getPointerTy(),
17090 FIN, DAG.getIntPtrConstant(8));
17091 SDValue RSFIN = DAG.getFrameIndex(FuncInfo->getRegSaveFrameIndex(),
17093 Store = DAG.getStore(Op.getOperand(0), DL, RSFIN, FIN,
17094 MachinePointerInfo(SV, 16), false, false, 0);
17095 MemOps.push_back(Store);
17096 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps);
17099 SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
17100 assert(Subtarget->is64Bit() &&
17101 "LowerVAARG only handles 64-bit va_arg!");
17102 assert((Subtarget->isTargetLinux() ||
17103 Subtarget->isTargetDarwin()) &&
17104 "Unhandled target in LowerVAARG");
17105 assert(Op.getNode()->getNumOperands() == 4);
17106 SDValue Chain = Op.getOperand(0);
17107 SDValue SrcPtr = Op.getOperand(1);
17108 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
17109 unsigned Align = Op.getConstantOperandVal(3);
17112 EVT ArgVT = Op.getNode()->getValueType(0);
17113 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
17114 uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
17117 // Decide which area this value should be read from.
17118 // TODO: Implement the AMD64 ABI in its entirety. This simple
17119 // selection mechanism works only for the basic types.
17120 if (ArgVT == MVT::f80) {
17121 llvm_unreachable("va_arg for f80 not yet implemented");
17122 } else if (ArgVT.isFloatingPoint() && ArgSize <= 16 /*bytes*/) {
17123 ArgMode = 2; // Argument passed in XMM register. Use fp_offset.
17124 } else if (ArgVT.isInteger() && ArgSize <= 32 /*bytes*/) {
17125 ArgMode = 1; // Argument passed in GPR64 register(s). Use gp_offset.
17127 llvm_unreachable("Unhandled argument type in LowerVAARG");
17130 if (ArgMode == 2) {
17131 // Sanity Check: Make sure using fp_offset makes sense.
17132 assert(!DAG.getTarget().Options.UseSoftFloat &&
17133 !(DAG.getMachineFunction().getFunction()->hasFnAttribute(
17134 Attribute::NoImplicitFloat)) &&
17135 Subtarget->hasSSE1());
17138 // Insert VAARG_64 node into the DAG
17139 // VAARG_64 returns two values: Variable Argument Address, Chain
17140 SmallVector<SDValue, 11> InstOps;
17141 InstOps.push_back(Chain);
17142 InstOps.push_back(SrcPtr);
17143 InstOps.push_back(DAG.getConstant(ArgSize, MVT::i32));
17144 InstOps.push_back(DAG.getConstant(ArgMode, MVT::i8));
17145 InstOps.push_back(DAG.getConstant(Align, MVT::i32));
17146 SDVTList VTs = DAG.getVTList(getPointerTy(), MVT::Other);
17147 SDValue VAARG = DAG.getMemIntrinsicNode(X86ISD::VAARG_64, dl,
17148 VTs, InstOps, MVT::i64,
17149 MachinePointerInfo(SV),
17151 /*Volatile=*/false,
17153 /*WriteMem=*/true);
17154 Chain = VAARG.getValue(1);
17156 // Load the next argument and return it
17157 return DAG.getLoad(ArgVT, dl,
17160 MachinePointerInfo(),
17161 false, false, false, 0);
17164 static SDValue LowerVACOPY(SDValue Op, const X86Subtarget *Subtarget,
17165 SelectionDAG &DAG) {
17166 // X86-64 va_list is a struct { i32, i32, i8*, i8* }.
17167 assert(Subtarget->is64Bit() && "This code only handles 64-bit va_copy!");
17168 SDValue Chain = Op.getOperand(0);
17169 SDValue DstPtr = Op.getOperand(1);
17170 SDValue SrcPtr = Op.getOperand(2);
17171 const Value *DstSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue();
17172 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
17175 return DAG.getMemcpy(Chain, DL, DstPtr, SrcPtr,
17176 DAG.getIntPtrConstant(24), 8, /*isVolatile*/false,
17178 MachinePointerInfo(DstSV), MachinePointerInfo(SrcSV));
17181 // getTargetVShiftByConstNode - Handle vector element shifts where the shift
17182 // amount is a constant. Takes immediate version of shift as input.
17183 static SDValue getTargetVShiftByConstNode(unsigned Opc, SDLoc dl, MVT VT,
17184 SDValue SrcOp, uint64_t ShiftAmt,
17185 SelectionDAG &DAG) {
17186 MVT ElementType = VT.getVectorElementType();
17188 // Fold this packed shift into its first operand if ShiftAmt is 0.
17192 // Check for ShiftAmt >= element width
17193 if (ShiftAmt >= ElementType.getSizeInBits()) {
17194 if (Opc == X86ISD::VSRAI)
17195 ShiftAmt = ElementType.getSizeInBits() - 1;
17197 return DAG.getConstant(0, VT);
17200 assert((Opc == X86ISD::VSHLI || Opc == X86ISD::VSRLI || Opc == X86ISD::VSRAI)
17201 && "Unknown target vector shift-by-constant node");
17203 // Fold this packed vector shift into a build vector if SrcOp is a
17204 // vector of Constants or UNDEFs, and SrcOp valuetype is the same as VT.
17205 if (VT == SrcOp.getSimpleValueType() &&
17206 ISD::isBuildVectorOfConstantSDNodes(SrcOp.getNode())) {
17207 SmallVector<SDValue, 8> Elts;
17208 unsigned NumElts = SrcOp->getNumOperands();
17209 ConstantSDNode *ND;
17212 default: llvm_unreachable(nullptr);
17213 case X86ISD::VSHLI:
17214 for (unsigned i=0; i!=NumElts; ++i) {
17215 SDValue CurrentOp = SrcOp->getOperand(i);
17216 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17217 Elts.push_back(CurrentOp);
17220 ND = cast<ConstantSDNode>(CurrentOp);
17221 const APInt &C = ND->getAPIntValue();
17222 Elts.push_back(DAG.getConstant(C.shl(ShiftAmt), ElementType));
17225 case X86ISD::VSRLI:
17226 for (unsigned i=0; i!=NumElts; ++i) {
17227 SDValue CurrentOp = SrcOp->getOperand(i);
17228 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17229 Elts.push_back(CurrentOp);
17232 ND = cast<ConstantSDNode>(CurrentOp);
17233 const APInt &C = ND->getAPIntValue();
17234 Elts.push_back(DAG.getConstant(C.lshr(ShiftAmt), ElementType));
17237 case X86ISD::VSRAI:
17238 for (unsigned i=0; i!=NumElts; ++i) {
17239 SDValue CurrentOp = SrcOp->getOperand(i);
17240 if (CurrentOp->getOpcode() == ISD::UNDEF) {
17241 Elts.push_back(CurrentOp);
17244 ND = cast<ConstantSDNode>(CurrentOp);
17245 const APInt &C = ND->getAPIntValue();
17246 Elts.push_back(DAG.getConstant(C.ashr(ShiftAmt), ElementType));
17251 return DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
17254 return DAG.getNode(Opc, dl, VT, SrcOp, DAG.getConstant(ShiftAmt, MVT::i8));
17257 // getTargetVShiftNode - Handle vector element shifts where the shift amount
17258 // may or may not be a constant. Takes immediate version of shift as input.
17259 static SDValue getTargetVShiftNode(unsigned Opc, SDLoc dl, MVT VT,
17260 SDValue SrcOp, SDValue ShAmt,
17261 SelectionDAG &DAG) {
17262 MVT SVT = ShAmt.getSimpleValueType();
17263 assert((SVT == MVT::i32 || SVT == MVT::i64) && "Unexpected value type!");
17265 // Catch shift-by-constant.
17266 if (ConstantSDNode *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
17267 return getTargetVShiftByConstNode(Opc, dl, VT, SrcOp,
17268 CShAmt->getZExtValue(), DAG);
17270 // Change opcode to non-immediate version
17272 default: llvm_unreachable("Unknown target vector shift node");
17273 case X86ISD::VSHLI: Opc = X86ISD::VSHL; break;
17274 case X86ISD::VSRLI: Opc = X86ISD::VSRL; break;
17275 case X86ISD::VSRAI: Opc = X86ISD::VSRA; break;
17278 const X86Subtarget &Subtarget =
17279 static_cast<const X86Subtarget &>(DAG.getSubtarget());
17280 if (Subtarget.hasSSE41() && ShAmt.getOpcode() == ISD::ZERO_EXTEND &&
17281 ShAmt.getOperand(0).getSimpleValueType() == MVT::i16) {
17282 // Let the shuffle legalizer expand this shift amount node.
17283 SDValue Op0 = ShAmt.getOperand(0);
17284 Op0 = DAG.getNode(ISD::SCALAR_TO_VECTOR, SDLoc(Op0), MVT::v8i16, Op0);
17285 ShAmt = getShuffleVectorZeroOrUndef(Op0, 0, true, &Subtarget, DAG);
17287 // Need to build a vector containing shift amount.
17288 // SSE/AVX packed shifts only use the lower 64-bit of the shift count.
17289 SmallVector<SDValue, 4> ShOps;
17290 ShOps.push_back(ShAmt);
17291 if (SVT == MVT::i32) {
17292 ShOps.push_back(DAG.getConstant(0, SVT));
17293 ShOps.push_back(DAG.getUNDEF(SVT));
17295 ShOps.push_back(DAG.getUNDEF(SVT));
17297 MVT BVT = SVT == MVT::i32 ? MVT::v4i32 : MVT::v2i64;
17298 ShAmt = DAG.getNode(ISD::BUILD_VECTOR, dl, BVT, ShOps);
17301 // The return type has to be a 128-bit type with the same element
17302 // type as the input type.
17303 MVT EltVT = VT.getVectorElementType();
17304 EVT ShVT = MVT::getVectorVT(EltVT, 128/EltVT.getSizeInBits());
17306 ShAmt = DAG.getNode(ISD::BITCAST, dl, ShVT, ShAmt);
17307 return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
17310 /// \brief Return (and \p Op, \p Mask) for compare instructions or
17311 /// (vselect \p Mask, \p Op, \p PreservedSrc) for others along with the
17312 /// necessary casting for \p Mask when lowering masking intrinsics.
17313 static SDValue getVectorMaskingNode(SDValue Op, SDValue Mask,
17314 SDValue PreservedSrc,
17315 const X86Subtarget *Subtarget,
17316 SelectionDAG &DAG) {
17317 EVT VT = Op.getValueType();
17318 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(),
17319 MVT::i1, VT.getVectorNumElements());
17320 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17321 Mask.getValueType().getSizeInBits());
17324 assert(MaskVT.isSimple() && "invalid mask type");
17326 if (isAllOnes(Mask))
17329 // In case when MaskVT equals v2i1 or v4i1, low 2 or 4 elements
17330 // are extracted by EXTRACT_SUBVECTOR.
17331 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17332 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17333 DAG.getIntPtrConstant(0));
17335 switch (Op.getOpcode()) {
17337 case X86ISD::PCMPEQM:
17338 case X86ISD::PCMPGTM:
17340 case X86ISD::CMPMU:
17341 return DAG.getNode(ISD::AND, dl, VT, Op, VMask);
17343 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17344 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17345 return DAG.getNode(ISD::VSELECT, dl, VT, VMask, Op, PreservedSrc);
17348 /// \brief Creates an SDNode for a predicated scalar operation.
17349 /// \returns (X86vselect \p Mask, \p Op, \p PreservedSrc).
17350 /// The mask is comming as MVT::i8 and it should be truncated
17351 /// to MVT::i1 while lowering masking intrinsics.
17352 /// The main difference between ScalarMaskingNode and VectorMaskingNode is using
17353 /// "X86select" instead of "vselect". We just can't create the "vselect" node for
17354 /// a scalar instruction.
17355 static SDValue getScalarMaskingNode(SDValue Op, SDValue Mask,
17356 SDValue PreservedSrc,
17357 const X86Subtarget *Subtarget,
17358 SelectionDAG &DAG) {
17359 if (isAllOnes(Mask))
17362 EVT VT = Op.getValueType();
17364 // The mask should be of type MVT::i1
17365 SDValue IMask = DAG.getNode(ISD::TRUNCATE, dl, MVT::i1, Mask);
17367 if (PreservedSrc.getOpcode() == ISD::UNDEF)
17368 PreservedSrc = getZeroVector(VT, Subtarget, DAG, dl);
17369 return DAG.getNode(X86ISD::SELECT, dl, VT, IMask, Op, PreservedSrc);
17372 static SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17373 SelectionDAG &DAG) {
17375 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
17376 EVT VT = Op.getValueType();
17377 const IntrinsicData* IntrData = getIntrinsicWithoutChain(IntNo);
17379 switch(IntrData->Type) {
17380 case INTR_TYPE_1OP:
17381 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1));
17382 case INTR_TYPE_2OP:
17383 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17385 case INTR_TYPE_3OP:
17386 return DAG.getNode(IntrData->Opc0, dl, Op.getValueType(), Op.getOperand(1),
17387 Op.getOperand(2), Op.getOperand(3));
17388 case INTR_TYPE_1OP_MASK_RM: {
17389 SDValue Src = Op.getOperand(1);
17390 SDValue Src0 = Op.getOperand(2);
17391 SDValue Mask = Op.getOperand(3);
17392 SDValue RoundingMode = Op.getOperand(4);
17393 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src,
17395 Mask, Src0, Subtarget, DAG);
17397 case INTR_TYPE_SCALAR_MASK_RM: {
17398 SDValue Src1 = Op.getOperand(1);
17399 SDValue Src2 = Op.getOperand(2);
17400 SDValue Src0 = Op.getOperand(3);
17401 SDValue Mask = Op.getOperand(4);
17402 SDValue RoundingMode = Op.getOperand(5);
17403 return getScalarMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT, Src1, Src2,
17405 Mask, Src0, Subtarget, DAG);
17407 case INTR_TYPE_2OP_MASK: {
17408 SDValue Mask = Op.getOperand(4);
17409 SDValue PassThru = Op.getOperand(3);
17410 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17411 if (IntrWithRoundingModeOpcode != 0) {
17412 unsigned Round = cast<ConstantSDNode>(Op.getOperand(5))->getZExtValue();
17413 if (Round != X86::STATIC_ROUNDING::CUR_DIRECTION) {
17414 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17415 dl, Op.getValueType(),
17416 Op.getOperand(1), Op.getOperand(2),
17417 Op.getOperand(3), Op.getOperand(5)),
17418 Mask, PassThru, Subtarget, DAG);
17421 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0, dl, VT,
17424 Mask, PassThru, Subtarget, DAG);
17426 case FMA_OP_MASK: {
17427 SDValue Src1 = Op.getOperand(1);
17428 SDValue Src2 = Op.getOperand(2);
17429 SDValue Src3 = Op.getOperand(3);
17430 SDValue Mask = Op.getOperand(4);
17431 unsigned IntrWithRoundingModeOpcode = IntrData->Opc1;
17432 if (IntrWithRoundingModeOpcode != 0) {
17433 SDValue Rnd = Op.getOperand(5);
17434 if (cast<ConstantSDNode>(Rnd)->getZExtValue() !=
17435 X86::STATIC_ROUNDING::CUR_DIRECTION)
17436 return getVectorMaskingNode(DAG.getNode(IntrWithRoundingModeOpcode,
17437 dl, Op.getValueType(),
17438 Src1, Src2, Src3, Rnd),
17439 Mask, Src1, Subtarget, DAG);
17441 return getVectorMaskingNode(DAG.getNode(IntrData->Opc0,
17442 dl, Op.getValueType(),
17444 Mask, Src1, Subtarget, DAG);
17447 case CMP_MASK_CC: {
17448 // Comparison intrinsics with masks.
17449 // Example of transformation:
17450 // (i8 (int_x86_avx512_mask_pcmpeq_q_128
17451 // (v2i64 %a), (v2i64 %b), (i8 %mask))) ->
17453 // (v8i1 (insert_subvector undef,
17454 // (v2i1 (and (PCMPEQM %a, %b),
17455 // (extract_subvector
17456 // (v8i1 (bitcast %mask)), 0))), 0))))
17457 EVT VT = Op.getOperand(1).getValueType();
17458 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17459 VT.getVectorNumElements());
17460 SDValue Mask = Op.getOperand((IntrData->Type == CMP_MASK_CC) ? 4 : 3);
17461 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17462 Mask.getValueType().getSizeInBits());
17464 if (IntrData->Type == CMP_MASK_CC) {
17465 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17466 Op.getOperand(2), Op.getOperand(3));
17468 assert(IntrData->Type == CMP_MASK && "Unexpected intrinsic type!");
17469 Cmp = DAG.getNode(IntrData->Opc0, dl, MaskVT, Op.getOperand(1),
17472 SDValue CmpMask = getVectorMaskingNode(Cmp, Mask,
17473 DAG.getTargetConstant(0, MaskVT),
17475 SDValue Res = DAG.getNode(ISD::INSERT_SUBVECTOR, dl, BitcastVT,
17476 DAG.getUNDEF(BitcastVT), CmpMask,
17477 DAG.getIntPtrConstant(0));
17478 return DAG.getNode(ISD::BITCAST, dl, Op.getValueType(), Res);
17480 case COMI: { // Comparison intrinsics
17481 ISD::CondCode CC = (ISD::CondCode)IntrData->Opc1;
17482 SDValue LHS = Op.getOperand(1);
17483 SDValue RHS = Op.getOperand(2);
17484 unsigned X86CC = TranslateX86CC(CC, true, LHS, RHS, DAG);
17485 assert(X86CC != X86::COND_INVALID && "Unexpected illegal condition!");
17486 SDValue Cond = DAG.getNode(IntrData->Opc0, dl, MVT::i32, LHS, RHS);
17487 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17488 DAG.getConstant(X86CC, MVT::i8), Cond);
17489 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17492 return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
17493 Op.getOperand(1), Op.getOperand(2), DAG);
17495 return getVectorMaskingNode(getTargetVShiftNode(IntrData->Opc0, dl,
17496 Op.getSimpleValueType(),
17498 Op.getOperand(2), DAG),
17499 Op.getOperand(4), Op.getOperand(3), Subtarget,
17501 case COMPRESS_EXPAND_IN_REG: {
17502 SDValue Mask = Op.getOperand(3);
17503 SDValue DataToCompress = Op.getOperand(1);
17504 SDValue PassThru = Op.getOperand(2);
17505 if (isAllOnes(Mask)) // return data as is
17506 return Op.getOperand(1);
17507 EVT VT = Op.getValueType();
17508 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17509 VT.getVectorNumElements());
17510 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17511 Mask.getValueType().getSizeInBits());
17513 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17514 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17515 DAG.getIntPtrConstant(0));
17517 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToCompress,
17521 SDValue Mask = Op.getOperand(3);
17522 EVT VT = Op.getValueType();
17523 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17524 VT.getVectorNumElements());
17525 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
17526 Mask.getValueType().getSizeInBits());
17528 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
17529 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
17530 DAG.getIntPtrConstant(0));
17531 return DAG.getNode(IntrData->Opc0, dl, VT, VMask, Op.getOperand(1),
17540 default: return SDValue(); // Don't custom lower most intrinsics.
17542 case Intrinsic::x86_avx512_mask_valign_q_512:
17543 case Intrinsic::x86_avx512_mask_valign_d_512:
17544 // Vector source operands are swapped.
17545 return getVectorMaskingNode(DAG.getNode(X86ISD::VALIGN, dl,
17546 Op.getValueType(), Op.getOperand(2),
17549 Op.getOperand(5), Op.getOperand(4),
17552 // ptest and testp intrinsics. The intrinsic these come from are designed to
17553 // return an integer value, not just an instruction so lower it to the ptest
17554 // or testp pattern and a setcc for the result.
17555 case Intrinsic::x86_sse41_ptestz:
17556 case Intrinsic::x86_sse41_ptestc:
17557 case Intrinsic::x86_sse41_ptestnzc:
17558 case Intrinsic::x86_avx_ptestz_256:
17559 case Intrinsic::x86_avx_ptestc_256:
17560 case Intrinsic::x86_avx_ptestnzc_256:
17561 case Intrinsic::x86_avx_vtestz_ps:
17562 case Intrinsic::x86_avx_vtestc_ps:
17563 case Intrinsic::x86_avx_vtestnzc_ps:
17564 case Intrinsic::x86_avx_vtestz_pd:
17565 case Intrinsic::x86_avx_vtestc_pd:
17566 case Intrinsic::x86_avx_vtestnzc_pd:
17567 case Intrinsic::x86_avx_vtestz_ps_256:
17568 case Intrinsic::x86_avx_vtestc_ps_256:
17569 case Intrinsic::x86_avx_vtestnzc_ps_256:
17570 case Intrinsic::x86_avx_vtestz_pd_256:
17571 case Intrinsic::x86_avx_vtestc_pd_256:
17572 case Intrinsic::x86_avx_vtestnzc_pd_256: {
17573 bool IsTestPacked = false;
17576 default: llvm_unreachable("Bad fallthrough in Intrinsic lowering.");
17577 case Intrinsic::x86_avx_vtestz_ps:
17578 case Intrinsic::x86_avx_vtestz_pd:
17579 case Intrinsic::x86_avx_vtestz_ps_256:
17580 case Intrinsic::x86_avx_vtestz_pd_256:
17581 IsTestPacked = true; // Fallthrough
17582 case Intrinsic::x86_sse41_ptestz:
17583 case Intrinsic::x86_avx_ptestz_256:
17585 X86CC = X86::COND_E;
17587 case Intrinsic::x86_avx_vtestc_ps:
17588 case Intrinsic::x86_avx_vtestc_pd:
17589 case Intrinsic::x86_avx_vtestc_ps_256:
17590 case Intrinsic::x86_avx_vtestc_pd_256:
17591 IsTestPacked = true; // Fallthrough
17592 case Intrinsic::x86_sse41_ptestc:
17593 case Intrinsic::x86_avx_ptestc_256:
17595 X86CC = X86::COND_B;
17597 case Intrinsic::x86_avx_vtestnzc_ps:
17598 case Intrinsic::x86_avx_vtestnzc_pd:
17599 case Intrinsic::x86_avx_vtestnzc_ps_256:
17600 case Intrinsic::x86_avx_vtestnzc_pd_256:
17601 IsTestPacked = true; // Fallthrough
17602 case Intrinsic::x86_sse41_ptestnzc:
17603 case Intrinsic::x86_avx_ptestnzc_256:
17605 X86CC = X86::COND_A;
17609 SDValue LHS = Op.getOperand(1);
17610 SDValue RHS = Op.getOperand(2);
17611 unsigned TestOpc = IsTestPacked ? X86ISD::TESTP : X86ISD::PTEST;
17612 SDValue Test = DAG.getNode(TestOpc, dl, MVT::i32, LHS, RHS);
17613 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17614 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8, CC, Test);
17615 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17617 case Intrinsic::x86_avx512_kortestz_w:
17618 case Intrinsic::x86_avx512_kortestc_w: {
17619 unsigned X86CC = (IntNo == Intrinsic::x86_avx512_kortestz_w)? X86::COND_E: X86::COND_B;
17620 SDValue LHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(1));
17621 SDValue RHS = DAG.getNode(ISD::BITCAST, dl, MVT::v16i1, Op.getOperand(2));
17622 SDValue CC = DAG.getConstant(X86CC, MVT::i8);
17623 SDValue Test = DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
17624 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i1, CC, Test);
17625 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17628 case Intrinsic::x86_sse42_pcmpistria128:
17629 case Intrinsic::x86_sse42_pcmpestria128:
17630 case Intrinsic::x86_sse42_pcmpistric128:
17631 case Intrinsic::x86_sse42_pcmpestric128:
17632 case Intrinsic::x86_sse42_pcmpistrio128:
17633 case Intrinsic::x86_sse42_pcmpestrio128:
17634 case Intrinsic::x86_sse42_pcmpistris128:
17635 case Intrinsic::x86_sse42_pcmpestris128:
17636 case Intrinsic::x86_sse42_pcmpistriz128:
17637 case Intrinsic::x86_sse42_pcmpestriz128: {
17641 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
17642 case Intrinsic::x86_sse42_pcmpistria128:
17643 Opcode = X86ISD::PCMPISTRI;
17644 X86CC = X86::COND_A;
17646 case Intrinsic::x86_sse42_pcmpestria128:
17647 Opcode = X86ISD::PCMPESTRI;
17648 X86CC = X86::COND_A;
17650 case Intrinsic::x86_sse42_pcmpistric128:
17651 Opcode = X86ISD::PCMPISTRI;
17652 X86CC = X86::COND_B;
17654 case Intrinsic::x86_sse42_pcmpestric128:
17655 Opcode = X86ISD::PCMPESTRI;
17656 X86CC = X86::COND_B;
17658 case Intrinsic::x86_sse42_pcmpistrio128:
17659 Opcode = X86ISD::PCMPISTRI;
17660 X86CC = X86::COND_O;
17662 case Intrinsic::x86_sse42_pcmpestrio128:
17663 Opcode = X86ISD::PCMPESTRI;
17664 X86CC = X86::COND_O;
17666 case Intrinsic::x86_sse42_pcmpistris128:
17667 Opcode = X86ISD::PCMPISTRI;
17668 X86CC = X86::COND_S;
17670 case Intrinsic::x86_sse42_pcmpestris128:
17671 Opcode = X86ISD::PCMPESTRI;
17672 X86CC = X86::COND_S;
17674 case Intrinsic::x86_sse42_pcmpistriz128:
17675 Opcode = X86ISD::PCMPISTRI;
17676 X86CC = X86::COND_E;
17678 case Intrinsic::x86_sse42_pcmpestriz128:
17679 Opcode = X86ISD::PCMPESTRI;
17680 X86CC = X86::COND_E;
17683 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17684 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17685 SDValue PCMP = DAG.getNode(Opcode, dl, VTs, NewOps);
17686 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17687 DAG.getConstant(X86CC, MVT::i8),
17688 SDValue(PCMP.getNode(), 1));
17689 return DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, SetCC);
17692 case Intrinsic::x86_sse42_pcmpistri128:
17693 case Intrinsic::x86_sse42_pcmpestri128: {
17695 if (IntNo == Intrinsic::x86_sse42_pcmpistri128)
17696 Opcode = X86ISD::PCMPISTRI;
17698 Opcode = X86ISD::PCMPESTRI;
17700 SmallVector<SDValue, 5> NewOps(Op->op_begin()+1, Op->op_end());
17701 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32);
17702 return DAG.getNode(Opcode, dl, VTs, NewOps);
17707 static SDValue getGatherNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17708 SDValue Src, SDValue Mask, SDValue Base,
17709 SDValue Index, SDValue ScaleOp, SDValue Chain,
17710 const X86Subtarget * Subtarget) {
17712 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17713 assert(C && "Invalid scale type");
17714 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17715 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17716 Index.getSimpleValueType().getVectorNumElements());
17718 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17720 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17722 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17723 SDVTList VTs = DAG.getVTList(Op.getValueType(), MaskVT, MVT::Other);
17724 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17725 SDValue Segment = DAG.getRegister(0, MVT::i32);
17726 if (Src.getOpcode() == ISD::UNDEF)
17727 Src = getZeroVector(Op.getValueType(), Subtarget, DAG, dl);
17728 SDValue Ops[] = {Src, MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17729 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17730 SDValue RetOps[] = { SDValue(Res, 0), SDValue(Res, 2) };
17731 return DAG.getMergeValues(RetOps, dl);
17734 static SDValue getScatterNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17735 SDValue Src, SDValue Mask, SDValue Base,
17736 SDValue Index, SDValue ScaleOp, SDValue Chain) {
17738 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17739 assert(C && "Invalid scale type");
17740 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17741 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17742 SDValue Segment = DAG.getRegister(0, MVT::i32);
17743 EVT MaskVT = MVT::getVectorVT(MVT::i1,
17744 Index.getSimpleValueType().getVectorNumElements());
17746 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17748 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17750 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17751 SDVTList VTs = DAG.getVTList(MaskVT, MVT::Other);
17752 SDValue Ops[] = {Base, Scale, Index, Disp, Segment, MaskInReg, Src, Chain};
17753 SDNode *Res = DAG.getMachineNode(Opc, dl, VTs, Ops);
17754 return SDValue(Res, 1);
17757 static SDValue getPrefetchNode(unsigned Opc, SDValue Op, SelectionDAG &DAG,
17758 SDValue Mask, SDValue Base, SDValue Index,
17759 SDValue ScaleOp, SDValue Chain) {
17761 ConstantSDNode *C = dyn_cast<ConstantSDNode>(ScaleOp);
17762 assert(C && "Invalid scale type");
17763 SDValue Scale = DAG.getTargetConstant(C->getZExtValue(), MVT::i8);
17764 SDValue Disp = DAG.getTargetConstant(0, MVT::i32);
17765 SDValue Segment = DAG.getRegister(0, MVT::i32);
17767 MVT::getVectorVT(MVT::i1, Index.getSimpleValueType().getVectorNumElements());
17769 ConstantSDNode *MaskC = dyn_cast<ConstantSDNode>(Mask);
17771 MaskInReg = DAG.getTargetConstant(MaskC->getSExtValue(), MaskVT);
17773 MaskInReg = DAG.getNode(ISD::BITCAST, dl, MaskVT, Mask);
17774 //SDVTList VTs = DAG.getVTList(MVT::Other);
17775 SDValue Ops[] = {MaskInReg, Base, Scale, Index, Disp, Segment, Chain};
17776 SDNode *Res = DAG.getMachineNode(Opc, dl, MVT::Other, Ops);
17777 return SDValue(Res, 0);
17780 // getReadPerformanceCounter - Handles the lowering of builtin intrinsics that
17781 // read performance monitor counters (x86_rdpmc).
17782 static void getReadPerformanceCounter(SDNode *N, SDLoc DL,
17783 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17784 SmallVectorImpl<SDValue> &Results) {
17785 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17786 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17789 // The ECX register is used to select the index of the performance counter
17791 SDValue Chain = DAG.getCopyToReg(N->getOperand(0), DL, X86::ECX,
17793 SDValue rd = DAG.getNode(X86ISD::RDPMC_DAG, DL, Tys, Chain);
17795 // Reads the content of a 64-bit performance counter and returns it in the
17796 // registers EDX:EAX.
17797 if (Subtarget->is64Bit()) {
17798 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17799 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17802 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17803 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17806 Chain = HI.getValue(1);
17808 if (Subtarget->is64Bit()) {
17809 // The EAX register is loaded with the low-order 32 bits. The EDX register
17810 // is loaded with the supported high-order bits of the counter.
17811 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17812 DAG.getConstant(32, MVT::i8));
17813 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17814 Results.push_back(Chain);
17818 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17819 SDValue Ops[] = { LO, HI };
17820 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17821 Results.push_back(Pair);
17822 Results.push_back(Chain);
17825 // getReadTimeStampCounter - Handles the lowering of builtin intrinsics that
17826 // read the time stamp counter (x86_rdtsc and x86_rdtscp). This function is
17827 // also used to custom lower READCYCLECOUNTER nodes.
17828 static void getReadTimeStampCounter(SDNode *N, SDLoc DL, unsigned Opcode,
17829 SelectionDAG &DAG, const X86Subtarget *Subtarget,
17830 SmallVectorImpl<SDValue> &Results) {
17831 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
17832 SDValue rd = DAG.getNode(Opcode, DL, Tys, N->getOperand(0));
17835 // The processor's time-stamp counter (a 64-bit MSR) is stored into the
17836 // EDX:EAX registers. EDX is loaded with the high-order 32 bits of the MSR
17837 // and the EAX register is loaded with the low-order 32 bits.
17838 if (Subtarget->is64Bit()) {
17839 LO = DAG.getCopyFromReg(rd, DL, X86::RAX, MVT::i64, rd.getValue(1));
17840 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::RDX, MVT::i64,
17843 LO = DAG.getCopyFromReg(rd, DL, X86::EAX, MVT::i32, rd.getValue(1));
17844 HI = DAG.getCopyFromReg(LO.getValue(1), DL, X86::EDX, MVT::i32,
17847 SDValue Chain = HI.getValue(1);
17849 if (Opcode == X86ISD::RDTSCP_DAG) {
17850 assert(N->getNumOperands() == 3 && "Unexpected number of operands!");
17852 // Instruction RDTSCP loads the IA32:TSC_AUX_MSR (address C000_0103H) into
17853 // the ECX register. Add 'ecx' explicitly to the chain.
17854 SDValue ecx = DAG.getCopyFromReg(Chain, DL, X86::ECX, MVT::i32,
17856 // Explicitly store the content of ECX at the location passed in input
17857 // to the 'rdtscp' intrinsic.
17858 Chain = DAG.getStore(ecx.getValue(1), DL, ecx, N->getOperand(2),
17859 MachinePointerInfo(), false, false, 0);
17862 if (Subtarget->is64Bit()) {
17863 // The EDX register is loaded with the high-order 32 bits of the MSR, and
17864 // the EAX register is loaded with the low-order 32 bits.
17865 SDValue Tmp = DAG.getNode(ISD::SHL, DL, MVT::i64, HI,
17866 DAG.getConstant(32, MVT::i8));
17867 Results.push_back(DAG.getNode(ISD::OR, DL, MVT::i64, LO, Tmp));
17868 Results.push_back(Chain);
17872 // Use a buildpair to merge the two 32-bit values into a 64-bit one.
17873 SDValue Ops[] = { LO, HI };
17874 SDValue Pair = DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, Ops);
17875 Results.push_back(Pair);
17876 Results.push_back(Chain);
17879 static SDValue LowerREADCYCLECOUNTER(SDValue Op, const X86Subtarget *Subtarget,
17880 SelectionDAG &DAG) {
17881 SmallVector<SDValue, 2> Results;
17883 getReadTimeStampCounter(Op.getNode(), DL, X86ISD::RDTSC_DAG, DAG, Subtarget,
17885 return DAG.getMergeValues(Results, DL);
17889 static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget *Subtarget,
17890 SelectionDAG &DAG) {
17891 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
17893 const IntrinsicData* IntrData = getIntrinsicWithChain(IntNo);
17898 switch(IntrData->Type) {
17900 llvm_unreachable("Unknown Intrinsic Type");
17904 // Emit the node with the right value type.
17905 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Glue, MVT::Other);
17906 SDValue Result = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17908 // If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
17909 // Otherwise return the value from Rand, which is always 0, casted to i32.
17910 SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
17911 DAG.getConstant(1, Op->getValueType(1)),
17912 DAG.getConstant(X86::COND_B, MVT::i32),
17913 SDValue(Result.getNode(), 1) };
17914 SDValue isValid = DAG.getNode(X86ISD::CMOV, dl,
17915 DAG.getVTList(Op->getValueType(1), MVT::Glue),
17918 // Return { result, isValid, chain }.
17919 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(), Result, isValid,
17920 SDValue(Result.getNode(), 2));
17923 //gather(v1, mask, index, base, scale);
17924 SDValue Chain = Op.getOperand(0);
17925 SDValue Src = Op.getOperand(2);
17926 SDValue Base = Op.getOperand(3);
17927 SDValue Index = Op.getOperand(4);
17928 SDValue Mask = Op.getOperand(5);
17929 SDValue Scale = Op.getOperand(6);
17930 return getGatherNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain,
17934 //scatter(base, mask, index, v1, scale);
17935 SDValue Chain = Op.getOperand(0);
17936 SDValue Base = Op.getOperand(2);
17937 SDValue Mask = Op.getOperand(3);
17938 SDValue Index = Op.getOperand(4);
17939 SDValue Src = Op.getOperand(5);
17940 SDValue Scale = Op.getOperand(6);
17941 return getScatterNode(IntrData->Opc0, Op, DAG, Src, Mask, Base, Index, Scale, Chain);
17944 SDValue Hint = Op.getOperand(6);
17946 if (dyn_cast<ConstantSDNode> (Hint) == nullptr ||
17947 (HintVal = dyn_cast<ConstantSDNode> (Hint)->getZExtValue()) > 1)
17948 llvm_unreachable("Wrong prefetch hint in intrinsic: should be 0 or 1");
17949 unsigned Opcode = (HintVal ? IntrData->Opc1 : IntrData->Opc0);
17950 SDValue Chain = Op.getOperand(0);
17951 SDValue Mask = Op.getOperand(2);
17952 SDValue Index = Op.getOperand(3);
17953 SDValue Base = Op.getOperand(4);
17954 SDValue Scale = Op.getOperand(5);
17955 return getPrefetchNode(Opcode, Op, DAG, Mask, Base, Index, Scale, Chain);
17957 // Read Time Stamp Counter (RDTSC) and Processor ID (RDTSCP).
17959 SmallVector<SDValue, 2> Results;
17960 getReadTimeStampCounter(Op.getNode(), dl, IntrData->Opc0, DAG, Subtarget, Results);
17961 return DAG.getMergeValues(Results, dl);
17963 // Read Performance Monitoring Counters.
17965 SmallVector<SDValue, 2> Results;
17966 getReadPerformanceCounter(Op.getNode(), dl, DAG, Subtarget, Results);
17967 return DAG.getMergeValues(Results, dl);
17969 // XTEST intrinsics.
17971 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17972 SDValue InTrans = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(0));
17973 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17974 DAG.getConstant(X86::COND_NE, MVT::i8),
17976 SDValue Ret = DAG.getNode(ISD::ZERO_EXTEND, dl, Op->getValueType(0), SetCC);
17977 return DAG.getNode(ISD::MERGE_VALUES, dl, Op->getVTList(),
17978 Ret, SDValue(InTrans.getNode(), 1));
17982 SmallVector<SDValue, 2> Results;
17983 SDVTList CFVTs = DAG.getVTList(Op->getValueType(0), MVT::Other);
17984 SDVTList VTs = DAG.getVTList(Op.getOperand(3)->getValueType(0), MVT::Other);
17985 SDValue GenCF = DAG.getNode(X86ISD::ADD, dl, CFVTs, Op.getOperand(2),
17986 DAG.getConstant(-1, MVT::i8));
17987 SDValue Res = DAG.getNode(IntrData->Opc0, dl, VTs, Op.getOperand(3),
17988 Op.getOperand(4), GenCF.getValue(1));
17989 SDValue Store = DAG.getStore(Op.getOperand(0), dl, Res.getValue(0),
17990 Op.getOperand(5), MachinePointerInfo(),
17992 SDValue SetCC = DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
17993 DAG.getConstant(X86::COND_B, MVT::i8),
17995 Results.push_back(SetCC);
17996 Results.push_back(Store);
17997 return DAG.getMergeValues(Results, dl);
17999 case COMPRESS_TO_MEM: {
18001 SDValue Mask = Op.getOperand(4);
18002 SDValue DataToCompress = Op.getOperand(3);
18003 SDValue Addr = Op.getOperand(2);
18004 SDValue Chain = Op.getOperand(0);
18006 if (isAllOnes(Mask)) // return just a store
18007 return DAG.getStore(Chain, dl, DataToCompress, Addr,
18008 MachinePointerInfo(), false, false, 0);
18010 EVT VT = DataToCompress.getValueType();
18011 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18012 VT.getVectorNumElements());
18013 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18014 Mask.getValueType().getSizeInBits());
18015 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18016 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18017 DAG.getIntPtrConstant(0));
18019 SDValue Compressed = DAG.getNode(IntrData->Opc0, dl, VT, VMask,
18020 DataToCompress, DAG.getUNDEF(VT));
18021 return DAG.getStore(Chain, dl, Compressed, Addr,
18022 MachinePointerInfo(), false, false, 0);
18024 case EXPAND_FROM_MEM: {
18026 SDValue Mask = Op.getOperand(4);
18027 SDValue PathThru = Op.getOperand(3);
18028 SDValue Addr = Op.getOperand(2);
18029 SDValue Chain = Op.getOperand(0);
18030 EVT VT = Op.getValueType();
18032 if (isAllOnes(Mask)) // return just a load
18033 return DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(), false, false,
18035 EVT MaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18036 VT.getVectorNumElements());
18037 EVT BitcastVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
18038 Mask.getValueType().getSizeInBits());
18039 SDValue VMask = DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, MaskVT,
18040 DAG.getNode(ISD::BITCAST, dl, BitcastVT, Mask),
18041 DAG.getIntPtrConstant(0));
18043 SDValue DataToExpand = DAG.getLoad(VT, dl, Chain, Addr, MachinePointerInfo(),
18044 false, false, false, 0);
18046 SmallVector<SDValue, 2> Results;
18047 Results.push_back(DAG.getNode(IntrData->Opc0, dl, VT, VMask, DataToExpand,
18049 Results.push_back(Chain);
18050 return DAG.getMergeValues(Results, dl);
18055 SDValue X86TargetLowering::LowerRETURNADDR(SDValue Op,
18056 SelectionDAG &DAG) const {
18057 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
18058 MFI->setReturnAddressIsTaken(true);
18060 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
18063 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18065 EVT PtrVT = getPointerTy();
18068 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
18069 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18070 SDValue Offset = DAG.getConstant(RegInfo->getSlotSize(), PtrVT);
18071 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18072 DAG.getNode(ISD::ADD, dl, PtrVT,
18073 FrameAddr, Offset),
18074 MachinePointerInfo(), false, false, false, 0);
18077 // Just load the return address.
18078 SDValue RetAddrFI = getReturnAddressFrameIndex(DAG);
18079 return DAG.getLoad(PtrVT, dl, DAG.getEntryNode(),
18080 RetAddrFI, MachinePointerInfo(), false, false, false, 0);
18083 SDValue X86TargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
18084 MachineFunction &MF = DAG.getMachineFunction();
18085 MachineFrameInfo *MFI = MF.getFrameInfo();
18086 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
18087 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18088 EVT VT = Op.getValueType();
18090 MFI->setFrameAddressIsTaken(true);
18092 if (MF.getTarget().getMCAsmInfo()->usesWindowsCFI()) {
18093 // Depth > 0 makes no sense on targets which use Windows unwind codes. It
18094 // is not possible to crawl up the stack without looking at the unwind codes
18096 int FrameAddrIndex = FuncInfo->getFAIndex();
18097 if (!FrameAddrIndex) {
18098 // Set up a frame object for the return address.
18099 unsigned SlotSize = RegInfo->getSlotSize();
18100 FrameAddrIndex = MF.getFrameInfo()->CreateFixedObject(
18101 SlotSize, /*Offset=*/INT64_MIN, /*IsImmutable=*/false);
18102 FuncInfo->setFAIndex(FrameAddrIndex);
18104 return DAG.getFrameIndex(FrameAddrIndex, VT);
18107 unsigned FrameReg =
18108 RegInfo->getPtrSizedFrameRegister(DAG.getMachineFunction());
18109 SDLoc dl(Op); // FIXME probably not meaningful
18110 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
18111 assert(((FrameReg == X86::RBP && VT == MVT::i64) ||
18112 (FrameReg == X86::EBP && VT == MVT::i32)) &&
18113 "Invalid Frame Register!");
18114 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, VT);
18116 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
18117 MachinePointerInfo(),
18118 false, false, false, 0);
18122 // FIXME? Maybe this could be a TableGen attribute on some registers and
18123 // this table could be generated automatically from RegInfo.
18124 unsigned X86TargetLowering::getRegisterByName(const char* RegName,
18126 unsigned Reg = StringSwitch<unsigned>(RegName)
18127 .Case("esp", X86::ESP)
18128 .Case("rsp", X86::RSP)
18132 report_fatal_error("Invalid register name global variable");
18135 SDValue X86TargetLowering::LowerFRAME_TO_ARGS_OFFSET(SDValue Op,
18136 SelectionDAG &DAG) const {
18137 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18138 return DAG.getIntPtrConstant(2 * RegInfo->getSlotSize());
18141 SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
18142 SDValue Chain = Op.getOperand(0);
18143 SDValue Offset = Op.getOperand(1);
18144 SDValue Handler = Op.getOperand(2);
18147 EVT PtrVT = getPointerTy();
18148 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
18149 unsigned FrameReg = RegInfo->getFrameRegister(DAG.getMachineFunction());
18150 assert(((FrameReg == X86::RBP && PtrVT == MVT::i64) ||
18151 (FrameReg == X86::EBP && PtrVT == MVT::i32)) &&
18152 "Invalid Frame Register!");
18153 SDValue Frame = DAG.getCopyFromReg(DAG.getEntryNode(), dl, FrameReg, PtrVT);
18154 unsigned StoreAddrReg = (PtrVT == MVT::i64) ? X86::RCX : X86::ECX;
18156 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, Frame,
18157 DAG.getIntPtrConstant(RegInfo->getSlotSize()));
18158 StoreAddr = DAG.getNode(ISD::ADD, dl, PtrVT, StoreAddr, Offset);
18159 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
18161 Chain = DAG.getCopyToReg(Chain, dl, StoreAddrReg, StoreAddr);
18163 return DAG.getNode(X86ISD::EH_RETURN, dl, MVT::Other, Chain,
18164 DAG.getRegister(StoreAddrReg, PtrVT));
18167 SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
18168 SelectionDAG &DAG) const {
18170 return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
18171 DAG.getVTList(MVT::i32, MVT::Other),
18172 Op.getOperand(0), Op.getOperand(1));
18175 SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
18176 SelectionDAG &DAG) const {
18178 return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
18179 Op.getOperand(0), Op.getOperand(1));
18182 static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
18183 return Op.getOperand(0);
18186 SDValue X86TargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
18187 SelectionDAG &DAG) const {
18188 SDValue Root = Op.getOperand(0);
18189 SDValue Trmp = Op.getOperand(1); // trampoline
18190 SDValue FPtr = Op.getOperand(2); // nested function
18191 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
18194 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
18195 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
18197 if (Subtarget->is64Bit()) {
18198 SDValue OutChains[6];
18200 // Large code-model.
18201 const unsigned char JMP64r = 0xFF; // 64-bit jmp through register opcode.
18202 const unsigned char MOV64ri = 0xB8; // X86::MOV64ri opcode.
18204 const unsigned char N86R10 = TRI->getEncodingValue(X86::R10) & 0x7;
18205 const unsigned char N86R11 = TRI->getEncodingValue(X86::R11) & 0x7;
18207 const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
18209 // Load the pointer to the nested function into R11.
18210 unsigned OpCode = ((MOV64ri | N86R11) << 8) | REX_WB; // movabsq r11
18211 SDValue Addr = Trmp;
18212 OutChains[0] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18213 Addr, MachinePointerInfo(TrmpAddr),
18216 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18217 DAG.getConstant(2, MVT::i64));
18218 OutChains[1] = DAG.getStore(Root, dl, FPtr, Addr,
18219 MachinePointerInfo(TrmpAddr, 2),
18222 // Load the 'nest' parameter value into R10.
18223 // R10 is specified in X86CallingConv.td
18224 OpCode = ((MOV64ri | N86R10) << 8) | REX_WB; // movabsq r10
18225 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18226 DAG.getConstant(10, MVT::i64));
18227 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18228 Addr, MachinePointerInfo(TrmpAddr, 10),
18231 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18232 DAG.getConstant(12, MVT::i64));
18233 OutChains[3] = DAG.getStore(Root, dl, Nest, Addr,
18234 MachinePointerInfo(TrmpAddr, 12),
18237 // Jump to the nested function.
18238 OpCode = (JMP64r << 8) | REX_WB; // jmpq *...
18239 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18240 DAG.getConstant(20, MVT::i64));
18241 OutChains[4] = DAG.getStore(Root, dl, DAG.getConstant(OpCode, MVT::i16),
18242 Addr, MachinePointerInfo(TrmpAddr, 20),
18245 unsigned char ModRM = N86R11 | (4 << 3) | (3 << 6); // ...r11
18246 Addr = DAG.getNode(ISD::ADD, dl, MVT::i64, Trmp,
18247 DAG.getConstant(22, MVT::i64));
18248 OutChains[5] = DAG.getStore(Root, dl, DAG.getConstant(ModRM, MVT::i8), Addr,
18249 MachinePointerInfo(TrmpAddr, 22),
18252 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18254 const Function *Func =
18255 cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
18256 CallingConv::ID CC = Func->getCallingConv();
18261 llvm_unreachable("Unsupported calling convention");
18262 case CallingConv::C:
18263 case CallingConv::X86_StdCall: {
18264 // Pass 'nest' parameter in ECX.
18265 // Must be kept in sync with X86CallingConv.td
18266 NestReg = X86::ECX;
18268 // Check that ECX wasn't needed by an 'inreg' parameter.
18269 FunctionType *FTy = Func->getFunctionType();
18270 const AttributeSet &Attrs = Func->getAttributes();
18272 if (!Attrs.isEmpty() && !Func->isVarArg()) {
18273 unsigned InRegCount = 0;
18276 for (FunctionType::param_iterator I = FTy->param_begin(),
18277 E = FTy->param_end(); I != E; ++I, ++Idx)
18278 if (Attrs.hasAttribute(Idx, Attribute::InReg))
18279 // FIXME: should only count parameters that are lowered to integers.
18280 InRegCount += (TD->getTypeSizeInBits(*I) + 31) / 32;
18282 if (InRegCount > 2) {
18283 report_fatal_error("Nest register in use - reduce number of inreg"
18289 case CallingConv::X86_FastCall:
18290 case CallingConv::X86_ThisCall:
18291 case CallingConv::Fast:
18292 // Pass 'nest' parameter in EAX.
18293 // Must be kept in sync with X86CallingConv.td
18294 NestReg = X86::EAX;
18298 SDValue OutChains[4];
18299 SDValue Addr, Disp;
18301 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18302 DAG.getConstant(10, MVT::i32));
18303 Disp = DAG.getNode(ISD::SUB, dl, MVT::i32, FPtr, Addr);
18305 // This is storing the opcode for MOV32ri.
18306 const unsigned char MOV32ri = 0xB8; // X86::MOV32ri's opcode byte.
18307 const unsigned char N86Reg = TRI->getEncodingValue(NestReg) & 0x7;
18308 OutChains[0] = DAG.getStore(Root, dl,
18309 DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
18310 Trmp, MachinePointerInfo(TrmpAddr),
18313 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18314 DAG.getConstant(1, MVT::i32));
18315 OutChains[1] = DAG.getStore(Root, dl, Nest, Addr,
18316 MachinePointerInfo(TrmpAddr, 1),
18319 const unsigned char JMP = 0xE9; // jmp <32bit dst> opcode.
18320 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18321 DAG.getConstant(5, MVT::i32));
18322 OutChains[2] = DAG.getStore(Root, dl, DAG.getConstant(JMP, MVT::i8), Addr,
18323 MachinePointerInfo(TrmpAddr, 5),
18326 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
18327 DAG.getConstant(6, MVT::i32));
18328 OutChains[3] = DAG.getStore(Root, dl, Disp, Addr,
18329 MachinePointerInfo(TrmpAddr, 6),
18332 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains);
18336 SDValue X86TargetLowering::LowerFLT_ROUNDS_(SDValue Op,
18337 SelectionDAG &DAG) const {
18339 The rounding mode is in bits 11:10 of FPSR, and has the following
18341 00 Round to nearest
18346 FLT_ROUNDS, on the other hand, expects the following:
18353 To perform the conversion, we do:
18354 (((((FPSR & 0x800) >> 11) | ((FPSR & 0x400) >> 9)) + 1) & 3)
18357 MachineFunction &MF = DAG.getMachineFunction();
18358 const TargetFrameLowering &TFI = *Subtarget->getFrameLowering();
18359 unsigned StackAlignment = TFI.getStackAlignment();
18360 MVT VT = Op.getSimpleValueType();
18363 // Save FP Control Word to stack slot
18364 int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment, false);
18365 SDValue StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
18367 MachineMemOperand *MMO =
18368 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(SSFI),
18369 MachineMemOperand::MOStore, 2, 2);
18371 SDValue Ops[] = { DAG.getEntryNode(), StackSlot };
18372 SDValue Chain = DAG.getMemIntrinsicNode(X86ISD::FNSTCW16m, DL,
18373 DAG.getVTList(MVT::Other),
18374 Ops, MVT::i16, MMO);
18376 // Load FP Control Word from stack slot
18377 SDValue CWD = DAG.getLoad(MVT::i16, DL, Chain, StackSlot,
18378 MachinePointerInfo(), false, false, false, 0);
18380 // Transform as necessary
18382 DAG.getNode(ISD::SRL, DL, MVT::i16,
18383 DAG.getNode(ISD::AND, DL, MVT::i16,
18384 CWD, DAG.getConstant(0x800, MVT::i16)),
18385 DAG.getConstant(11, MVT::i8));
18387 DAG.getNode(ISD::SRL, DL, MVT::i16,
18388 DAG.getNode(ISD::AND, DL, MVT::i16,
18389 CWD, DAG.getConstant(0x400, MVT::i16)),
18390 DAG.getConstant(9, MVT::i8));
18393 DAG.getNode(ISD::AND, DL, MVT::i16,
18394 DAG.getNode(ISD::ADD, DL, MVT::i16,
18395 DAG.getNode(ISD::OR, DL, MVT::i16, CWD1, CWD2),
18396 DAG.getConstant(1, MVT::i16)),
18397 DAG.getConstant(3, MVT::i16));
18399 return DAG.getNode((VT.getSizeInBits() < 16 ?
18400 ISD::TRUNCATE : ISD::ZERO_EXTEND), DL, VT, RetVal);
18403 static SDValue LowerCTLZ(SDValue Op, SelectionDAG &DAG) {
18404 MVT VT = Op.getSimpleValueType();
18406 unsigned NumBits = VT.getSizeInBits();
18409 Op = Op.getOperand(0);
18410 if (VT == MVT::i8) {
18411 // Zero extend to i32 since there is not an i8 bsr.
18413 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18416 // Issue a bsr (scan bits in reverse) which also sets EFLAGS.
18417 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18418 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18420 // If src is zero (i.e. bsr sets ZF), returns NumBits.
18423 DAG.getConstant(NumBits+NumBits-1, OpVT),
18424 DAG.getConstant(X86::COND_E, MVT::i8),
18427 Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
18429 // Finally xor with NumBits-1.
18430 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18433 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18437 static SDValue LowerCTLZ_ZERO_UNDEF(SDValue Op, SelectionDAG &DAG) {
18438 MVT VT = Op.getSimpleValueType();
18440 unsigned NumBits = VT.getSizeInBits();
18443 Op = Op.getOperand(0);
18444 if (VT == MVT::i8) {
18445 // Zero extend to i32 since there is not an i8 bsr.
18447 Op = DAG.getNode(ISD::ZERO_EXTEND, dl, OpVT, Op);
18450 // Issue a bsr (scan bits in reverse).
18451 SDVTList VTs = DAG.getVTList(OpVT, MVT::i32);
18452 Op = DAG.getNode(X86ISD::BSR, dl, VTs, Op);
18454 // And xor with NumBits-1.
18455 Op = DAG.getNode(ISD::XOR, dl, OpVT, Op, DAG.getConstant(NumBits-1, OpVT));
18458 Op = DAG.getNode(ISD::TRUNCATE, dl, MVT::i8, Op);
18462 static SDValue LowerCTTZ(SDValue Op, SelectionDAG &DAG) {
18463 MVT VT = Op.getSimpleValueType();
18464 unsigned NumBits = VT.getSizeInBits();
18466 Op = Op.getOperand(0);
18468 // Issue a bsf (scan bits forward) which also sets EFLAGS.
18469 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
18470 Op = DAG.getNode(X86ISD::BSF, dl, VTs, Op);
18472 // If src is zero (i.e. bsf sets ZF), returns NumBits.
18475 DAG.getConstant(NumBits, VT),
18476 DAG.getConstant(X86::COND_E, MVT::i8),
18479 return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
18482 // Lower256IntArith - Break a 256-bit integer operation into two new 128-bit
18483 // ones, and then concatenate the result back.
18484 static SDValue Lower256IntArith(SDValue Op, SelectionDAG &DAG) {
18485 MVT VT = Op.getSimpleValueType();
18487 assert(VT.is256BitVector() && VT.isInteger() &&
18488 "Unsupported value type for operation");
18490 unsigned NumElems = VT.getVectorNumElements();
18493 // Extract the LHS vectors
18494 SDValue LHS = Op.getOperand(0);
18495 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
18496 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
18498 // Extract the RHS vectors
18499 SDValue RHS = Op.getOperand(1);
18500 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, dl);
18501 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, dl);
18503 MVT EltVT = VT.getVectorElementType();
18504 MVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
18506 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT,
18507 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, RHS1),
18508 DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, RHS2));
18511 static SDValue LowerADD(SDValue Op, SelectionDAG &DAG) {
18512 assert(Op.getSimpleValueType().is256BitVector() &&
18513 Op.getSimpleValueType().isInteger() &&
18514 "Only handle AVX 256-bit vector integer operation");
18515 return Lower256IntArith(Op, DAG);
18518 static SDValue LowerSUB(SDValue Op, SelectionDAG &DAG) {
18519 assert(Op.getSimpleValueType().is256BitVector() &&
18520 Op.getSimpleValueType().isInteger() &&
18521 "Only handle AVX 256-bit vector integer operation");
18522 return Lower256IntArith(Op, DAG);
18525 static SDValue LowerMUL(SDValue Op, const X86Subtarget *Subtarget,
18526 SelectionDAG &DAG) {
18528 MVT VT = Op.getSimpleValueType();
18530 // Decompose 256-bit ops into smaller 128-bit ops.
18531 if (VT.is256BitVector() && !Subtarget->hasInt256())
18532 return Lower256IntArith(Op, DAG);
18534 SDValue A = Op.getOperand(0);
18535 SDValue B = Op.getOperand(1);
18537 // Lower v4i32 mul as 2x shuffle, 2x pmuludq, 2x shuffle.
18538 if (VT == MVT::v4i32) {
18539 assert(Subtarget->hasSSE2() && !Subtarget->hasSSE41() &&
18540 "Should not custom lower when pmuldq is available!");
18542 // Extract the odd parts.
18543 static const int UnpackMask[] = { 1, -1, 3, -1 };
18544 SDValue Aodds = DAG.getVectorShuffle(VT, dl, A, A, UnpackMask);
18545 SDValue Bodds = DAG.getVectorShuffle(VT, dl, B, B, UnpackMask);
18547 // Multiply the even parts.
18548 SDValue Evens = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, A, B);
18549 // Now multiply odd parts.
18550 SDValue Odds = DAG.getNode(X86ISD::PMULUDQ, dl, MVT::v2i64, Aodds, Bodds);
18552 Evens = DAG.getNode(ISD::BITCAST, dl, VT, Evens);
18553 Odds = DAG.getNode(ISD::BITCAST, dl, VT, Odds);
18555 // Merge the two vectors back together with a shuffle. This expands into 2
18557 static const int ShufMask[] = { 0, 4, 2, 6 };
18558 return DAG.getVectorShuffle(VT, dl, Evens, Odds, ShufMask);
18561 assert((VT == MVT::v2i64 || VT == MVT::v4i64 || VT == MVT::v8i64) &&
18562 "Only know how to lower V2I64/V4I64/V8I64 multiply");
18564 // Ahi = psrlqi(a, 32);
18565 // Bhi = psrlqi(b, 32);
18567 // AloBlo = pmuludq(a, b);
18568 // AloBhi = pmuludq(a, Bhi);
18569 // AhiBlo = pmuludq(Ahi, b);
18571 // AloBhi = psllqi(AloBhi, 32);
18572 // AhiBlo = psllqi(AhiBlo, 32);
18573 // return AloBlo + AloBhi + AhiBlo;
18575 SDValue Ahi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, A, 32, DAG);
18576 SDValue Bhi = getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, B, 32, DAG);
18578 // Bit cast to 32-bit vectors for MULUDQ
18579 EVT MulVT = (VT == MVT::v2i64) ? MVT::v4i32 :
18580 (VT == MVT::v4i64) ? MVT::v8i32 : MVT::v16i32;
18581 A = DAG.getNode(ISD::BITCAST, dl, MulVT, A);
18582 B = DAG.getNode(ISD::BITCAST, dl, MulVT, B);
18583 Ahi = DAG.getNode(ISD::BITCAST, dl, MulVT, Ahi);
18584 Bhi = DAG.getNode(ISD::BITCAST, dl, MulVT, Bhi);
18586 SDValue AloBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, B);
18587 SDValue AloBhi = DAG.getNode(X86ISD::PMULUDQ, dl, VT, A, Bhi);
18588 SDValue AhiBlo = DAG.getNode(X86ISD::PMULUDQ, dl, VT, Ahi, B);
18590 AloBhi = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AloBhi, 32, DAG);
18591 AhiBlo = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, AhiBlo, 32, DAG);
18593 SDValue Res = DAG.getNode(ISD::ADD, dl, VT, AloBlo, AloBhi);
18594 return DAG.getNode(ISD::ADD, dl, VT, Res, AhiBlo);
18597 SDValue X86TargetLowering::LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const {
18598 assert(Subtarget->isTargetWin64() && "Unexpected target");
18599 EVT VT = Op.getValueType();
18600 assert(VT.isInteger() && VT.getSizeInBits() == 128 &&
18601 "Unexpected return type for lowering");
18605 switch (Op->getOpcode()) {
18606 default: llvm_unreachable("Unexpected request for libcall!");
18607 case ISD::SDIV: isSigned = true; LC = RTLIB::SDIV_I128; break;
18608 case ISD::UDIV: isSigned = false; LC = RTLIB::UDIV_I128; break;
18609 case ISD::SREM: isSigned = true; LC = RTLIB::SREM_I128; break;
18610 case ISD::UREM: isSigned = false; LC = RTLIB::UREM_I128; break;
18611 case ISD::SDIVREM: isSigned = true; LC = RTLIB::SDIVREM_I128; break;
18612 case ISD::UDIVREM: isSigned = false; LC = RTLIB::UDIVREM_I128; break;
18616 SDValue InChain = DAG.getEntryNode();
18618 TargetLowering::ArgListTy Args;
18619 TargetLowering::ArgListEntry Entry;
18620 for (unsigned i = 0, e = Op->getNumOperands(); i != e; ++i) {
18621 EVT ArgVT = Op->getOperand(i).getValueType();
18622 assert(ArgVT.isInteger() && ArgVT.getSizeInBits() == 128 &&
18623 "Unexpected argument type for lowering");
18624 SDValue StackPtr = DAG.CreateStackTemporary(ArgVT, 16);
18625 Entry.Node = StackPtr;
18626 InChain = DAG.getStore(InChain, dl, Op->getOperand(i), StackPtr, MachinePointerInfo(),
18628 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
18629 Entry.Ty = PointerType::get(ArgTy,0);
18630 Entry.isSExt = false;
18631 Entry.isZExt = false;
18632 Args.push_back(Entry);
18635 SDValue Callee = DAG.getExternalSymbol(getLibcallName(LC),
18638 TargetLowering::CallLoweringInfo CLI(DAG);
18639 CLI.setDebugLoc(dl).setChain(InChain)
18640 .setCallee(getLibcallCallingConv(LC),
18641 static_cast<EVT>(MVT::v2i64).getTypeForEVT(*DAG.getContext()),
18642 Callee, std::move(Args), 0)
18643 .setInRegister().setSExtResult(isSigned).setZExtResult(!isSigned);
18645 std::pair<SDValue, SDValue> CallInfo = LowerCallTo(CLI);
18646 return DAG.getNode(ISD::BITCAST, dl, VT, CallInfo.first);
18649 static SDValue LowerMUL_LOHI(SDValue Op, const X86Subtarget *Subtarget,
18650 SelectionDAG &DAG) {
18651 SDValue Op0 = Op.getOperand(0), Op1 = Op.getOperand(1);
18652 EVT VT = Op0.getValueType();
18655 assert((VT == MVT::v4i32 && Subtarget->hasSSE2()) ||
18656 (VT == MVT::v8i32 && Subtarget->hasInt256()));
18658 // PMULxD operations multiply each even value (starting at 0) of LHS with
18659 // the related value of RHS and produce a widen result.
18660 // E.g., PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18661 // => <2 x i64> <ae|cg>
18663 // In other word, to have all the results, we need to perform two PMULxD:
18664 // 1. one with the even values.
18665 // 2. one with the odd values.
18666 // To achieve #2, with need to place the odd values at an even position.
18668 // Place the odd value at an even position (basically, shift all values 1
18669 // step to the left):
18670 const int Mask[] = {1, -1, 3, -1, 5, -1, 7, -1};
18671 // <a|b|c|d> => <b|undef|d|undef>
18672 SDValue Odd0 = DAG.getVectorShuffle(VT, dl, Op0, Op0, Mask);
18673 // <e|f|g|h> => <f|undef|h|undef>
18674 SDValue Odd1 = DAG.getVectorShuffle(VT, dl, Op1, Op1, Mask);
18676 // Emit two multiplies, one for the lower 2 ints and one for the higher 2
18678 MVT MulVT = VT == MVT::v4i32 ? MVT::v2i64 : MVT::v4i64;
18679 bool IsSigned = Op->getOpcode() == ISD::SMUL_LOHI;
18681 (!IsSigned || !Subtarget->hasSSE41()) ? X86ISD::PMULUDQ : X86ISD::PMULDQ;
18682 // PMULUDQ <4 x i32> <a|b|c|d>, <4 x i32> <e|f|g|h>
18683 // => <2 x i64> <ae|cg>
18684 SDValue Mul1 = DAG.getNode(ISD::BITCAST, dl, VT,
18685 DAG.getNode(Opcode, dl, MulVT, Op0, Op1));
18686 // PMULUDQ <4 x i32> <b|undef|d|undef>, <4 x i32> <f|undef|h|undef>
18687 // => <2 x i64> <bf|dh>
18688 SDValue Mul2 = DAG.getNode(ISD::BITCAST, dl, VT,
18689 DAG.getNode(Opcode, dl, MulVT, Odd0, Odd1));
18691 // Shuffle it back into the right order.
18692 SDValue Highs, Lows;
18693 if (VT == MVT::v8i32) {
18694 const int HighMask[] = {1, 9, 3, 11, 5, 13, 7, 15};
18695 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18696 const int LowMask[] = {0, 8, 2, 10, 4, 12, 6, 14};
18697 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18699 const int HighMask[] = {1, 5, 3, 7};
18700 Highs = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, HighMask);
18701 const int LowMask[] = {0, 4, 2, 6};
18702 Lows = DAG.getVectorShuffle(VT, dl, Mul1, Mul2, LowMask);
18705 // If we have a signed multiply but no PMULDQ fix up the high parts of a
18706 // unsigned multiply.
18707 if (IsSigned && !Subtarget->hasSSE41()) {
18709 DAG.getConstant(31, DAG.getTargetLoweringInfo().getShiftAmountTy(VT));
18710 SDValue T1 = DAG.getNode(ISD::AND, dl, VT,
18711 DAG.getNode(ISD::SRA, dl, VT, Op0, ShAmt), Op1);
18712 SDValue T2 = DAG.getNode(ISD::AND, dl, VT,
18713 DAG.getNode(ISD::SRA, dl, VT, Op1, ShAmt), Op0);
18715 SDValue Fixup = DAG.getNode(ISD::ADD, dl, VT, T1, T2);
18716 Highs = DAG.getNode(ISD::SUB, dl, VT, Highs, Fixup);
18719 // The first result of MUL_LOHI is actually the low value, followed by the
18721 SDValue Ops[] = {Lows, Highs};
18722 return DAG.getMergeValues(Ops, dl);
18725 static SDValue LowerScalarImmediateShift(SDValue Op, SelectionDAG &DAG,
18726 const X86Subtarget *Subtarget) {
18727 MVT VT = Op.getSimpleValueType();
18729 SDValue R = Op.getOperand(0);
18730 SDValue Amt = Op.getOperand(1);
18732 // Optimize shl/srl/sra with constant shift amount.
18733 if (auto *BVAmt = dyn_cast<BuildVectorSDNode>(Amt)) {
18734 if (auto *ShiftConst = BVAmt->getConstantSplatNode()) {
18735 uint64_t ShiftAmt = ShiftConst->getZExtValue();
18737 if (VT == MVT::v2i64 || VT == MVT::v4i32 || VT == MVT::v8i16 ||
18738 (Subtarget->hasInt256() &&
18739 (VT == MVT::v4i64 || VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18740 (Subtarget->hasAVX512() &&
18741 (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18742 if (Op.getOpcode() == ISD::SHL)
18743 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18745 if (Op.getOpcode() == ISD::SRL)
18746 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18748 if (Op.getOpcode() == ISD::SRA && VT != MVT::v2i64 && VT != MVT::v4i64)
18749 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18753 if (VT == MVT::v16i8) {
18754 if (Op.getOpcode() == ISD::SHL) {
18755 // Make a large shift.
18756 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18757 MVT::v8i16, R, ShiftAmt,
18759 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18760 // Zero out the rightmost bits.
18761 SmallVector<SDValue, 16> V(16,
18762 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18764 return DAG.getNode(ISD::AND, dl, VT, SHL,
18765 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18767 if (Op.getOpcode() == ISD::SRL) {
18768 // Make a large shift.
18769 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18770 MVT::v8i16, R, ShiftAmt,
18772 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18773 // Zero out the leftmost bits.
18774 SmallVector<SDValue, 16> V(16,
18775 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18777 return DAG.getNode(ISD::AND, dl, VT, SRL,
18778 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18780 if (Op.getOpcode() == ISD::SRA) {
18781 if (ShiftAmt == 7) {
18782 // R s>> 7 === R s< 0
18783 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18784 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18787 // R s>> a === ((R u>> a) ^ m) - m
18788 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18789 SmallVector<SDValue, 16> V(16, DAG.getConstant(128 >> ShiftAmt,
18791 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18792 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18793 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18796 llvm_unreachable("Unknown shift opcode.");
18799 if (Subtarget->hasInt256() && VT == MVT::v32i8) {
18800 if (Op.getOpcode() == ISD::SHL) {
18801 // Make a large shift.
18802 SDValue SHL = getTargetVShiftByConstNode(X86ISD::VSHLI, dl,
18803 MVT::v16i16, R, ShiftAmt,
18805 SHL = DAG.getNode(ISD::BITCAST, dl, VT, SHL);
18806 // Zero out the rightmost bits.
18807 SmallVector<SDValue, 32> V(32,
18808 DAG.getConstant(uint8_t(-1U << ShiftAmt),
18810 return DAG.getNode(ISD::AND, dl, VT, SHL,
18811 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18813 if (Op.getOpcode() == ISD::SRL) {
18814 // Make a large shift.
18815 SDValue SRL = getTargetVShiftByConstNode(X86ISD::VSRLI, dl,
18816 MVT::v16i16, R, ShiftAmt,
18818 SRL = DAG.getNode(ISD::BITCAST, dl, VT, SRL);
18819 // Zero out the leftmost bits.
18820 SmallVector<SDValue, 32> V(32,
18821 DAG.getConstant(uint8_t(-1U) >> ShiftAmt,
18823 return DAG.getNode(ISD::AND, dl, VT, SRL,
18824 DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V));
18826 if (Op.getOpcode() == ISD::SRA) {
18827 if (ShiftAmt == 7) {
18828 // R s>> 7 === R s< 0
18829 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
18830 return DAG.getNode(X86ISD::PCMPGT, dl, VT, Zeros, R);
18833 // R s>> a === ((R u>> a) ^ m) - m
18834 SDValue Res = DAG.getNode(ISD::SRL, dl, VT, R, Amt);
18835 SmallVector<SDValue, 32> V(32, DAG.getConstant(128 >> ShiftAmt,
18837 SDValue Mask = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, V);
18838 Res = DAG.getNode(ISD::XOR, dl, VT, Res, Mask);
18839 Res = DAG.getNode(ISD::SUB, dl, VT, Res, Mask);
18842 llvm_unreachable("Unknown shift opcode.");
18847 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18848 if (!Subtarget->is64Bit() &&
18849 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64)) &&
18850 Amt.getOpcode() == ISD::BITCAST &&
18851 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
18852 Amt = Amt.getOperand(0);
18853 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
18854 VT.getVectorNumElements();
18855 unsigned RatioInLog2 = Log2_32_Ceil(Ratio);
18856 uint64_t ShiftAmt = 0;
18857 for (unsigned i = 0; i != Ratio; ++i) {
18858 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Amt.getOperand(i));
18862 ShiftAmt |= C->getZExtValue() << (i * (1 << (6 - RatioInLog2)));
18864 // Check remaining shift amounts.
18865 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
18866 uint64_t ShAmt = 0;
18867 for (unsigned j = 0; j != Ratio; ++j) {
18868 ConstantSDNode *C =
18869 dyn_cast<ConstantSDNode>(Amt.getOperand(i + j));
18873 ShAmt |= C->getZExtValue() << (j * (1 << (6 - RatioInLog2)));
18875 if (ShAmt != ShiftAmt)
18878 switch (Op.getOpcode()) {
18880 llvm_unreachable("Unknown shift opcode!");
18882 return getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, R, ShiftAmt,
18885 return getTargetVShiftByConstNode(X86ISD::VSRLI, dl, VT, R, ShiftAmt,
18888 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, R, ShiftAmt,
18896 static SDValue LowerScalarVariableShift(SDValue Op, SelectionDAG &DAG,
18897 const X86Subtarget* Subtarget) {
18898 MVT VT = Op.getSimpleValueType();
18900 SDValue R = Op.getOperand(0);
18901 SDValue Amt = Op.getOperand(1);
18903 if ((VT == MVT::v2i64 && Op.getOpcode() != ISD::SRA) ||
18904 VT == MVT::v4i32 || VT == MVT::v8i16 ||
18905 (Subtarget->hasInt256() &&
18906 ((VT == MVT::v4i64 && Op.getOpcode() != ISD::SRA) ||
18907 VT == MVT::v8i32 || VT == MVT::v16i16)) ||
18908 (Subtarget->hasAVX512() && (VT == MVT::v8i64 || VT == MVT::v16i32))) {
18910 EVT EltVT = VT.getVectorElementType();
18912 if (BuildVectorSDNode *BV = dyn_cast<BuildVectorSDNode>(Amt)) {
18913 // Check if this build_vector node is doing a splat.
18914 // If so, then set BaseShAmt equal to the splat value.
18915 BaseShAmt = BV->getSplatValue();
18916 if (BaseShAmt && BaseShAmt.getOpcode() == ISD::UNDEF)
18917 BaseShAmt = SDValue();
18919 if (Amt.getOpcode() == ISD::EXTRACT_SUBVECTOR)
18920 Amt = Amt.getOperand(0);
18922 ShuffleVectorSDNode *SVN = dyn_cast<ShuffleVectorSDNode>(Amt);
18923 if (SVN && SVN->isSplat()) {
18924 unsigned SplatIdx = (unsigned)SVN->getSplatIndex();
18925 SDValue InVec = Amt.getOperand(0);
18926 if (InVec.getOpcode() == ISD::BUILD_VECTOR) {
18927 assert((SplatIdx < InVec.getValueType().getVectorNumElements()) &&
18928 "Unexpected shuffle index found!");
18929 BaseShAmt = InVec.getOperand(SplatIdx);
18930 } else if (InVec.getOpcode() == ISD::INSERT_VECTOR_ELT) {
18931 if (ConstantSDNode *C =
18932 dyn_cast<ConstantSDNode>(InVec.getOperand(2))) {
18933 if (C->getZExtValue() == SplatIdx)
18934 BaseShAmt = InVec.getOperand(1);
18939 // Avoid introducing an extract element from a shuffle.
18940 BaseShAmt = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT, InVec,
18941 DAG.getIntPtrConstant(SplatIdx));
18945 if (BaseShAmt.getNode()) {
18946 assert(EltVT.bitsLE(MVT::i64) && "Unexpected element type!");
18947 if (EltVT != MVT::i64 && EltVT.bitsGT(MVT::i32))
18948 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i64, BaseShAmt);
18949 else if (EltVT.bitsLT(MVT::i32))
18950 BaseShAmt = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::i32, BaseShAmt);
18952 switch (Op.getOpcode()) {
18954 llvm_unreachable("Unknown shift opcode!");
18956 switch (VT.SimpleTy) {
18957 default: return SDValue();
18966 return getTargetVShiftNode(X86ISD::VSHLI, dl, VT, R, BaseShAmt, DAG);
18969 switch (VT.SimpleTy) {
18970 default: return SDValue();
18977 return getTargetVShiftNode(X86ISD::VSRAI, dl, VT, R, BaseShAmt, DAG);
18980 switch (VT.SimpleTy) {
18981 default: return SDValue();
18990 return getTargetVShiftNode(X86ISD::VSRLI, dl, VT, R, BaseShAmt, DAG);
18996 // Special case in 32-bit mode, where i64 is expanded into high and low parts.
18997 if (!Subtarget->is64Bit() &&
18998 (VT == MVT::v2i64 || (Subtarget->hasInt256() && VT == MVT::v4i64) ||
18999 (Subtarget->hasAVX512() && VT == MVT::v8i64)) &&
19000 Amt.getOpcode() == ISD::BITCAST &&
19001 Amt.getOperand(0).getOpcode() == ISD::BUILD_VECTOR) {
19002 Amt = Amt.getOperand(0);
19003 unsigned Ratio = Amt.getSimpleValueType().getVectorNumElements() /
19004 VT.getVectorNumElements();
19005 std::vector<SDValue> Vals(Ratio);
19006 for (unsigned i = 0; i != Ratio; ++i)
19007 Vals[i] = Amt.getOperand(i);
19008 for (unsigned i = Ratio; i != Amt.getNumOperands(); i += Ratio) {
19009 for (unsigned j = 0; j != Ratio; ++j)
19010 if (Vals[j] != Amt.getOperand(i + j))
19013 switch (Op.getOpcode()) {
19015 llvm_unreachable("Unknown shift opcode!");
19017 return DAG.getNode(X86ISD::VSHL, dl, VT, R, Op.getOperand(1));
19019 return DAG.getNode(X86ISD::VSRL, dl, VT, R, Op.getOperand(1));
19021 return DAG.getNode(X86ISD::VSRA, dl, VT, R, Op.getOperand(1));
19028 static SDValue LowerShift(SDValue Op, const X86Subtarget* Subtarget,
19029 SelectionDAG &DAG) {
19030 MVT VT = Op.getSimpleValueType();
19032 SDValue R = Op.getOperand(0);
19033 SDValue Amt = Op.getOperand(1);
19036 assert(VT.isVector() && "Custom lowering only for vector shifts!");
19037 assert(Subtarget->hasSSE2() && "Only custom lower when we have SSE2!");
19039 V = LowerScalarImmediateShift(Op, DAG, Subtarget);
19043 V = LowerScalarVariableShift(Op, DAG, Subtarget);
19047 if (Subtarget->hasAVX512() && (VT == MVT::v16i32 || VT == MVT::v8i64))
19049 // AVX2 has VPSLLV/VPSRAV/VPSRLV.
19050 if (Subtarget->hasInt256()) {
19051 if (Op.getOpcode() == ISD::SRL &&
19052 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19053 VT == MVT::v4i64 || VT == MVT::v8i32))
19055 if (Op.getOpcode() == ISD::SHL &&
19056 (VT == MVT::v2i64 || VT == MVT::v4i32 ||
19057 VT == MVT::v4i64 || VT == MVT::v8i32))
19059 if (Op.getOpcode() == ISD::SRA && (VT == MVT::v4i32 || VT == MVT::v8i32))
19063 // If possible, lower this packed shift into a vector multiply instead of
19064 // expanding it into a sequence of scalar shifts.
19065 // Do this only if the vector shift count is a constant build_vector.
19066 if (Op.getOpcode() == ISD::SHL &&
19067 (VT == MVT::v8i16 || VT == MVT::v4i32 ||
19068 (Subtarget->hasInt256() && VT == MVT::v16i16)) &&
19069 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19070 SmallVector<SDValue, 8> Elts;
19071 EVT SVT = VT.getScalarType();
19072 unsigned SVTBits = SVT.getSizeInBits();
19073 const APInt &One = APInt(SVTBits, 1);
19074 unsigned NumElems = VT.getVectorNumElements();
19076 for (unsigned i=0; i !=NumElems; ++i) {
19077 SDValue Op = Amt->getOperand(i);
19078 if (Op->getOpcode() == ISD::UNDEF) {
19079 Elts.push_back(Op);
19083 ConstantSDNode *ND = cast<ConstantSDNode>(Op);
19084 const APInt &C = APInt(SVTBits, ND->getAPIntValue().getZExtValue());
19085 uint64_t ShAmt = C.getZExtValue();
19086 if (ShAmt >= SVTBits) {
19087 Elts.push_back(DAG.getUNDEF(SVT));
19090 Elts.push_back(DAG.getConstant(One.shl(ShAmt), SVT));
19092 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Elts);
19093 return DAG.getNode(ISD::MUL, dl, VT, R, BV);
19096 // Lower SHL with variable shift amount.
19097 if (VT == MVT::v4i32 && Op->getOpcode() == ISD::SHL) {
19098 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(23, VT));
19100 Op = DAG.getNode(ISD::ADD, dl, VT, Op, DAG.getConstant(0x3f800000U, VT));
19101 Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4f32, Op);
19102 Op = DAG.getNode(ISD::FP_TO_SINT, dl, VT, Op);
19103 return DAG.getNode(ISD::MUL, dl, VT, Op, R);
19106 // If possible, lower this shift as a sequence of two shifts by
19107 // constant plus a MOVSS/MOVSD instead of scalarizing it.
19109 // (v4i32 (srl A, (build_vector < X, Y, Y, Y>)))
19111 // Could be rewritten as:
19112 // (v4i32 (MOVSS (srl A, <Y,Y,Y,Y>), (srl A, <X,X,X,X>)))
19114 // The advantage is that the two shifts from the example would be
19115 // lowered as X86ISD::VSRLI nodes. This would be cheaper than scalarizing
19116 // the vector shift into four scalar shifts plus four pairs of vector
19118 if ((VT == MVT::v8i16 || VT == MVT::v4i32) &&
19119 ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
19120 unsigned TargetOpcode = X86ISD::MOVSS;
19121 bool CanBeSimplified;
19122 // The splat value for the first packed shift (the 'X' from the example).
19123 SDValue Amt1 = Amt->getOperand(0);
19124 // The splat value for the second packed shift (the 'Y' from the example).
19125 SDValue Amt2 = (VT == MVT::v4i32) ? Amt->getOperand(1) :
19126 Amt->getOperand(2);
19128 // See if it is possible to replace this node with a sequence of
19129 // two shifts followed by a MOVSS/MOVSD
19130 if (VT == MVT::v4i32) {
19131 // Check if it is legal to use a MOVSS.
19132 CanBeSimplified = Amt2 == Amt->getOperand(2) &&
19133 Amt2 == Amt->getOperand(3);
19134 if (!CanBeSimplified) {
19135 // Otherwise, check if we can still simplify this node using a MOVSD.
19136 CanBeSimplified = Amt1 == Amt->getOperand(1) &&
19137 Amt->getOperand(2) == Amt->getOperand(3);
19138 TargetOpcode = X86ISD::MOVSD;
19139 Amt2 = Amt->getOperand(2);
19142 // Do similar checks for the case where the machine value type
19144 CanBeSimplified = Amt1 == Amt->getOperand(1);
19145 for (unsigned i=3; i != 8 && CanBeSimplified; ++i)
19146 CanBeSimplified = Amt2 == Amt->getOperand(i);
19148 if (!CanBeSimplified) {
19149 TargetOpcode = X86ISD::MOVSD;
19150 CanBeSimplified = true;
19151 Amt2 = Amt->getOperand(4);
19152 for (unsigned i=0; i != 4 && CanBeSimplified; ++i)
19153 CanBeSimplified = Amt1 == Amt->getOperand(i);
19154 for (unsigned j=4; j != 8 && CanBeSimplified; ++j)
19155 CanBeSimplified = Amt2 == Amt->getOperand(j);
19159 if (CanBeSimplified && isa<ConstantSDNode>(Amt1) &&
19160 isa<ConstantSDNode>(Amt2)) {
19161 // Replace this node with two shifts followed by a MOVSS/MOVSD.
19162 EVT CastVT = MVT::v4i32;
19164 DAG.getConstant(cast<ConstantSDNode>(Amt1)->getAPIntValue(), VT);
19165 SDValue Shift1 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat1);
19167 DAG.getConstant(cast<ConstantSDNode>(Amt2)->getAPIntValue(), VT);
19168 SDValue Shift2 = DAG.getNode(Op->getOpcode(), dl, VT, R, Splat2);
19169 if (TargetOpcode == X86ISD::MOVSD)
19170 CastVT = MVT::v2i64;
19171 SDValue BitCast1 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift1);
19172 SDValue BitCast2 = DAG.getNode(ISD::BITCAST, dl, CastVT, Shift2);
19173 SDValue Result = getTargetShuffleNode(TargetOpcode, dl, CastVT, BitCast2,
19175 return DAG.getNode(ISD::BITCAST, dl, VT, Result);
19179 if (VT == MVT::v16i8 && Op->getOpcode() == ISD::SHL) {
19180 assert(Subtarget->hasSSE2() && "Need SSE2 for pslli/pcmpeq.");
19183 Op = DAG.getNode(ISD::SHL, dl, VT, Amt, DAG.getConstant(5, VT));
19184 Op = DAG.getNode(ISD::BITCAST, dl, VT, Op);
19186 // Turn 'a' into a mask suitable for VSELECT
19187 SDValue VSelM = DAG.getConstant(0x80, VT);
19188 SDValue OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19189 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19191 SDValue CM1 = DAG.getConstant(0x0f, VT);
19192 SDValue CM2 = DAG.getConstant(0x3f, VT);
19194 // r = VSELECT(r, psllw(r & (char16)15, 4), a);
19195 SDValue M = DAG.getNode(ISD::AND, dl, VT, R, CM1);
19196 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 4, DAG);
19197 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19198 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19201 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19202 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19203 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19205 // r = VSELECT(r, psllw(r & (char16)63, 2), a);
19206 M = DAG.getNode(ISD::AND, dl, VT, R, CM2);
19207 M = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, MVT::v8i16, M, 2, DAG);
19208 M = DAG.getNode(ISD::BITCAST, dl, VT, M);
19209 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel, M, R);
19212 Op = DAG.getNode(ISD::ADD, dl, VT, Op, Op);
19213 OpVSel = DAG.getNode(ISD::AND, dl, VT, VSelM, Op);
19214 OpVSel = DAG.getNode(X86ISD::PCMPEQ, dl, VT, OpVSel, VSelM);
19216 // return VSELECT(r, r+r, a);
19217 R = DAG.getNode(ISD::VSELECT, dl, VT, OpVSel,
19218 DAG.getNode(ISD::ADD, dl, VT, R, R), R);
19222 // It's worth extending once and using the v8i32 shifts for 16-bit types, but
19223 // the extra overheads to get from v16i8 to v8i32 make the existing SSE
19224 // solution better.
19225 if (Subtarget->hasInt256() && VT == MVT::v8i16) {
19226 MVT NewVT = VT == MVT::v8i16 ? MVT::v8i32 : MVT::v16i16;
19228 Op.getOpcode() == ISD::SRA ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
19229 R = DAG.getNode(ExtOpc, dl, NewVT, R);
19230 Amt = DAG.getNode(ISD::ANY_EXTEND, dl, NewVT, Amt);
19231 return DAG.getNode(ISD::TRUNCATE, dl, VT,
19232 DAG.getNode(Op.getOpcode(), dl, NewVT, R, Amt));
19235 // Decompose 256-bit shifts into smaller 128-bit shifts.
19236 if (VT.is256BitVector()) {
19237 unsigned NumElems = VT.getVectorNumElements();
19238 MVT EltVT = VT.getVectorElementType();
19239 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19241 // Extract the two vectors
19242 SDValue V1 = Extract128BitVector(R, 0, DAG, dl);
19243 SDValue V2 = Extract128BitVector(R, NumElems/2, DAG, dl);
19245 // Recreate the shift amount vectors
19246 SDValue Amt1, Amt2;
19247 if (Amt.getOpcode() == ISD::BUILD_VECTOR) {
19248 // Constant shift amount
19249 SmallVector<SDValue, 4> Amt1Csts;
19250 SmallVector<SDValue, 4> Amt2Csts;
19251 for (unsigned i = 0; i != NumElems/2; ++i)
19252 Amt1Csts.push_back(Amt->getOperand(i));
19253 for (unsigned i = NumElems/2; i != NumElems; ++i)
19254 Amt2Csts.push_back(Amt->getOperand(i));
19256 Amt1 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt1Csts);
19257 Amt2 = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Amt2Csts);
19259 // Variable shift amount
19260 Amt1 = Extract128BitVector(Amt, 0, DAG, dl);
19261 Amt2 = Extract128BitVector(Amt, NumElems/2, DAG, dl);
19264 // Issue new vector shifts for the smaller types
19265 V1 = DAG.getNode(Op.getOpcode(), dl, NewVT, V1, Amt1);
19266 V2 = DAG.getNode(Op.getOpcode(), dl, NewVT, V2, Amt2);
19268 // Concatenate the result back
19269 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, V1, V2);
19275 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) {
19276 // Lower the "add/sub/mul with overflow" instruction into a regular ins plus
19277 // a "setcc" instruction that checks the overflow flag. The "brcond" lowering
19278 // looks for this combo and may remove the "setcc" instruction if the "setcc"
19279 // has only one use.
19280 SDNode *N = Op.getNode();
19281 SDValue LHS = N->getOperand(0);
19282 SDValue RHS = N->getOperand(1);
19283 unsigned BaseOp = 0;
19286 switch (Op.getOpcode()) {
19287 default: llvm_unreachable("Unknown ovf instruction!");
19289 // A subtract of one will be selected as a INC. Note that INC doesn't
19290 // set CF, so we can't do this for UADDO.
19291 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19293 BaseOp = X86ISD::INC;
19294 Cond = X86::COND_O;
19297 BaseOp = X86ISD::ADD;
19298 Cond = X86::COND_O;
19301 BaseOp = X86ISD::ADD;
19302 Cond = X86::COND_B;
19305 // A subtract of one will be selected as a DEC. Note that DEC doesn't
19306 // set CF, so we can't do this for USUBO.
19307 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS))
19309 BaseOp = X86ISD::DEC;
19310 Cond = X86::COND_O;
19313 BaseOp = X86ISD::SUB;
19314 Cond = X86::COND_O;
19317 BaseOp = X86ISD::SUB;
19318 Cond = X86::COND_B;
19321 BaseOp = N->getValueType(0) == MVT::i8 ? X86ISD::SMUL8 : X86ISD::SMUL;
19322 Cond = X86::COND_O;
19324 case ISD::UMULO: { // i64, i8 = umulo lhs, rhs --> i64, i64, i32 umul lhs,rhs
19325 if (N->getValueType(0) == MVT::i8) {
19326 BaseOp = X86ISD::UMUL8;
19327 Cond = X86::COND_O;
19330 SDVTList VTs = DAG.getVTList(N->getValueType(0), N->getValueType(0),
19332 SDValue Sum = DAG.getNode(X86ISD::UMUL, DL, VTs, LHS, RHS);
19335 DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
19336 DAG.getConstant(X86::COND_O, MVT::i32),
19337 SDValue(Sum.getNode(), 2));
19339 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19343 // Also sets EFLAGS.
19344 SDVTList VTs = DAG.getVTList(N->getValueType(0), MVT::i32);
19345 SDValue Sum = DAG.getNode(BaseOp, DL, VTs, LHS, RHS);
19348 DAG.getNode(X86ISD::SETCC, DL, N->getValueType(1),
19349 DAG.getConstant(Cond, MVT::i32),
19350 SDValue(Sum.getNode(), 1));
19352 return DAG.getNode(ISD::MERGE_VALUES, DL, N->getVTList(), Sum, SetCC);
19355 // Sign extension of the low part of vector elements. This may be used either
19356 // when sign extend instructions are not available or if the vector element
19357 // sizes already match the sign-extended size. If the vector elements are in
19358 // their pre-extended size and sign extend instructions are available, that will
19359 // be handled by LowerSIGN_EXTEND.
19360 SDValue X86TargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
19361 SelectionDAG &DAG) const {
19363 EVT ExtraVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
19364 MVT VT = Op.getSimpleValueType();
19366 if (!Subtarget->hasSSE2() || !VT.isVector())
19369 unsigned BitsDiff = VT.getScalarType().getSizeInBits() -
19370 ExtraVT.getScalarType().getSizeInBits();
19372 switch (VT.SimpleTy) {
19373 default: return SDValue();
19376 if (!Subtarget->hasFp256())
19378 if (!Subtarget->hasInt256()) {
19379 // needs to be split
19380 unsigned NumElems = VT.getVectorNumElements();
19382 // Extract the LHS vectors
19383 SDValue LHS = Op.getOperand(0);
19384 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, dl);
19385 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, dl);
19387 MVT EltVT = VT.getVectorElementType();
19388 EVT NewVT = MVT::getVectorVT(EltVT, NumElems/2);
19390 EVT ExtraEltVT = ExtraVT.getVectorElementType();
19391 unsigned ExtraNumElems = ExtraVT.getVectorNumElements();
19392 ExtraVT = EVT::getVectorVT(*DAG.getContext(), ExtraEltVT,
19394 SDValue Extra = DAG.getValueType(ExtraVT);
19396 LHS1 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS1, Extra);
19397 LHS2 = DAG.getNode(Op.getOpcode(), dl, NewVT, LHS2, Extra);
19399 return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, LHS1, LHS2);
19404 SDValue Op0 = Op.getOperand(0);
19406 // This is a sign extension of some low part of vector elements without
19407 // changing the size of the vector elements themselves:
19408 // Shift-Left + Shift-Right-Algebraic.
19409 SDValue Shl = getTargetVShiftByConstNode(X86ISD::VSHLI, dl, VT, Op0,
19411 return getTargetVShiftByConstNode(X86ISD::VSRAI, dl, VT, Shl, BitsDiff,
19417 /// Returns true if the operand type is exactly twice the native width, and
19418 /// the corresponding cmpxchg8b or cmpxchg16b instruction is available.
19419 /// Used to know whether to use cmpxchg8/16b when expanding atomic operations
19420 /// (otherwise we leave them alone to become __sync_fetch_and_... calls).
19421 bool X86TargetLowering::needsCmpXchgNb(const Type *MemType) const {
19422 unsigned OpWidth = MemType->getPrimitiveSizeInBits();
19425 return !Subtarget->is64Bit(); // FIXME this should be Subtarget.hasCmpxchg8b
19426 else if (OpWidth == 128)
19427 return Subtarget->hasCmpxchg16b();
19432 bool X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
19433 return needsCmpXchgNb(SI->getValueOperand()->getType());
19436 // Note: this turns large loads into lock cmpxchg8b/16b.
19437 // FIXME: On 32 bits x86, fild/movq might be faster than lock cmpxchg8b.
19438 bool X86TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const {
19439 auto PTy = cast<PointerType>(LI->getPointerOperand()->getType());
19440 return needsCmpXchgNb(PTy->getElementType());
19443 bool X86TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
19444 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19445 const Type *MemType = AI->getType();
19447 // If the operand is too big, we must see if cmpxchg8/16b is available
19448 // and default to library calls otherwise.
19449 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19450 return needsCmpXchgNb(MemType);
19452 AtomicRMWInst::BinOp Op = AI->getOperation();
19455 llvm_unreachable("Unknown atomic operation");
19456 case AtomicRMWInst::Xchg:
19457 case AtomicRMWInst::Add:
19458 case AtomicRMWInst::Sub:
19459 // It's better to use xadd, xsub or xchg for these in all cases.
19461 case AtomicRMWInst::Or:
19462 case AtomicRMWInst::And:
19463 case AtomicRMWInst::Xor:
19464 // If the atomicrmw's result isn't actually used, we can just add a "lock"
19465 // prefix to a normal instruction for these operations.
19466 return !AI->use_empty();
19467 case AtomicRMWInst::Nand:
19468 case AtomicRMWInst::Max:
19469 case AtomicRMWInst::Min:
19470 case AtomicRMWInst::UMax:
19471 case AtomicRMWInst::UMin:
19472 // These always require a non-trivial set of data operations on x86. We must
19473 // use a cmpxchg loop.
19478 static bool hasMFENCE(const X86Subtarget& Subtarget) {
19479 // Use mfence if we have SSE2 or we're on x86-64 (even if we asked for
19480 // no-sse2). There isn't any reason to disable it if the target processor
19482 return Subtarget.hasSSE2() || Subtarget.is64Bit();
19486 X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
19487 unsigned NativeWidth = Subtarget->is64Bit() ? 64 : 32;
19488 const Type *MemType = AI->getType();
19489 // Accesses larger than the native width are turned into cmpxchg/libcalls, so
19490 // there is no benefit in turning such RMWs into loads, and it is actually
19491 // harmful as it introduces a mfence.
19492 if (MemType->getPrimitiveSizeInBits() > NativeWidth)
19495 auto Builder = IRBuilder<>(AI);
19496 Module *M = Builder.GetInsertBlock()->getParent()->getParent();
19497 auto SynchScope = AI->getSynchScope();
19498 // We must restrict the ordering to avoid generating loads with Release or
19499 // ReleaseAcquire orderings.
19500 auto Order = AtomicCmpXchgInst::getStrongestFailureOrdering(AI->getOrdering());
19501 auto Ptr = AI->getPointerOperand();
19503 // Before the load we need a fence. Here is an example lifted from
19504 // http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf showing why a fence
19507 // x.store(1, relaxed);
19508 // r1 = y.fetch_add(0, release);
19510 // y.fetch_add(42, acquire);
19511 // r2 = x.load(relaxed);
19512 // r1 = r2 = 0 is impossible, but becomes possible if the idempotent rmw is
19513 // lowered to just a load without a fence. A mfence flushes the store buffer,
19514 // making the optimization clearly correct.
19515 // FIXME: it is required if isAtLeastRelease(Order) but it is not clear
19516 // otherwise, we might be able to be more agressive on relaxed idempotent
19517 // rmw. In practice, they do not look useful, so we don't try to be
19518 // especially clever.
19519 if (SynchScope == SingleThread) {
19520 // FIXME: we could just insert an X86ISD::MEMBARRIER here, except we are at
19521 // the IR level, so we must wrap it in an intrinsic.
19523 } else if (hasMFENCE(*Subtarget)) {
19524 Function *MFence = llvm::Intrinsic::getDeclaration(M,
19525 Intrinsic::x86_sse2_mfence);
19526 Builder.CreateCall(MFence);
19528 // FIXME: it might make sense to use a locked operation here but on a
19529 // different cache-line to prevent cache-line bouncing. In practice it
19530 // is probably a small win, and x86 processors without mfence are rare
19531 // enough that we do not bother.
19535 // Finally we can emit the atomic load.
19536 LoadInst *Loaded = Builder.CreateAlignedLoad(Ptr,
19537 AI->getType()->getPrimitiveSizeInBits());
19538 Loaded->setAtomic(Order, SynchScope);
19539 AI->replaceAllUsesWith(Loaded);
19540 AI->eraseFromParent();
19544 static SDValue LowerATOMIC_FENCE(SDValue Op, const X86Subtarget *Subtarget,
19545 SelectionDAG &DAG) {
19547 AtomicOrdering FenceOrdering = static_cast<AtomicOrdering>(
19548 cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue());
19549 SynchronizationScope FenceScope = static_cast<SynchronizationScope>(
19550 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
19552 // The only fence that needs an instruction is a sequentially-consistent
19553 // cross-thread fence.
19554 if (FenceOrdering == SequentiallyConsistent && FenceScope == CrossThread) {
19555 if (hasMFENCE(*Subtarget))
19556 return DAG.getNode(X86ISD::MFENCE, dl, MVT::Other, Op.getOperand(0));
19558 SDValue Chain = Op.getOperand(0);
19559 SDValue Zero = DAG.getConstant(0, MVT::i32);
19561 DAG.getRegister(X86::ESP, MVT::i32), // Base
19562 DAG.getTargetConstant(1, MVT::i8), // Scale
19563 DAG.getRegister(0, MVT::i32), // Index
19564 DAG.getTargetConstant(0, MVT::i32), // Disp
19565 DAG.getRegister(0, MVT::i32), // Segment.
19569 SDNode *Res = DAG.getMachineNode(X86::OR32mrLocked, dl, MVT::Other, Ops);
19570 return SDValue(Res, 0);
19573 // MEMBARRIER is a compiler barrier; it codegens to a no-op.
19574 return DAG.getNode(X86ISD::MEMBARRIER, dl, MVT::Other, Op.getOperand(0));
19577 static SDValue LowerCMP_SWAP(SDValue Op, const X86Subtarget *Subtarget,
19578 SelectionDAG &DAG) {
19579 MVT T = Op.getSimpleValueType();
19583 switch(T.SimpleTy) {
19584 default: llvm_unreachable("Invalid value type!");
19585 case MVT::i8: Reg = X86::AL; size = 1; break;
19586 case MVT::i16: Reg = X86::AX; size = 2; break;
19587 case MVT::i32: Reg = X86::EAX; size = 4; break;
19589 assert(Subtarget->is64Bit() && "Node not type legal!");
19590 Reg = X86::RAX; size = 8;
19593 SDValue cpIn = DAG.getCopyToReg(Op.getOperand(0), DL, Reg,
19594 Op.getOperand(2), SDValue());
19595 SDValue Ops[] = { cpIn.getValue(0),
19598 DAG.getTargetConstant(size, MVT::i8),
19599 cpIn.getValue(1) };
19600 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
19601 MachineMemOperand *MMO = cast<AtomicSDNode>(Op)->getMemOperand();
19602 SDValue Result = DAG.getMemIntrinsicNode(X86ISD::LCMPXCHG_DAG, DL, Tys,
19606 DAG.getCopyFromReg(Result.getValue(0), DL, Reg, T, Result.getValue(1));
19607 SDValue EFLAGS = DAG.getCopyFromReg(cpOut.getValue(1), DL, X86::EFLAGS,
19608 MVT::i32, cpOut.getValue(2));
19609 SDValue Success = DAG.getNode(X86ISD::SETCC, DL, Op->getValueType(1),
19610 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
19612 DAG.ReplaceAllUsesOfValueWith(Op.getValue(0), cpOut);
19613 DAG.ReplaceAllUsesOfValueWith(Op.getValue(1), Success);
19614 DAG.ReplaceAllUsesOfValueWith(Op.getValue(2), EFLAGS.getValue(1));
19618 static SDValue LowerBITCAST(SDValue Op, const X86Subtarget *Subtarget,
19619 SelectionDAG &DAG) {
19620 MVT SrcVT = Op.getOperand(0).getSimpleValueType();
19621 MVT DstVT = Op.getSimpleValueType();
19623 if (SrcVT == MVT::v2i32 || SrcVT == MVT::v4i16 || SrcVT == MVT::v8i8) {
19624 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
19625 if (DstVT != MVT::f64)
19626 // This conversion needs to be expanded.
19629 SDValue InVec = Op->getOperand(0);
19631 unsigned NumElts = SrcVT.getVectorNumElements();
19632 EVT SVT = SrcVT.getVectorElementType();
19634 // Widen the vector in input in the case of MVT::v2i32.
19635 // Example: from MVT::v2i32 to MVT::v4i32.
19636 SmallVector<SDValue, 16> Elts;
19637 for (unsigned i = 0, e = NumElts; i != e; ++i)
19638 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT, InVec,
19639 DAG.getIntPtrConstant(i)));
19641 // Explicitly mark the extra elements as Undef.
19642 SDValue Undef = DAG.getUNDEF(SVT);
19643 for (unsigned i = NumElts, e = NumElts * 2; i != e; ++i)
19644 Elts.push_back(Undef);
19646 EVT NewVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
19647 SDValue BV = DAG.getNode(ISD::BUILD_VECTOR, dl, NewVT, Elts);
19648 SDValue ToV2F64 = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, BV);
19649 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::f64, ToV2F64,
19650 DAG.getIntPtrConstant(0));
19653 assert(Subtarget->is64Bit() && !Subtarget->hasSSE2() &&
19654 Subtarget->hasMMX() && "Unexpected custom BITCAST");
19655 assert((DstVT == MVT::i64 ||
19656 (DstVT.isVector() && DstVT.getSizeInBits()==64)) &&
19657 "Unexpected custom BITCAST");
19658 // i64 <=> MMX conversions are Legal.
19659 if (SrcVT==MVT::i64 && DstVT.isVector())
19661 if (DstVT==MVT::i64 && SrcVT.isVector())
19663 // MMX <=> MMX conversions are Legal.
19664 if (SrcVT.isVector() && DstVT.isVector())
19666 // All other conversions need to be expanded.
19670 static SDValue LowerCTPOP(SDValue Op, const X86Subtarget *Subtarget,
19671 SelectionDAG &DAG) {
19672 SDNode *Node = Op.getNode();
19675 Op = Op.getOperand(0);
19676 EVT VT = Op.getValueType();
19677 assert((VT.is128BitVector() || VT.is256BitVector()) &&
19678 "CTPOP lowering only implemented for 128/256-bit wide vector types");
19680 unsigned NumElts = VT.getVectorNumElements();
19681 EVT EltVT = VT.getVectorElementType();
19682 unsigned Len = EltVT.getSizeInBits();
19684 // This is the vectorized version of the "best" algorithm from
19685 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
19686 // with a minor tweak to use a series of adds + shifts instead of vector
19687 // multiplications. Implemented for the v2i64, v4i64, v4i32, v8i32 types:
19689 // v2i64, v4i64, v4i32 => Only profitable w/ popcnt disabled
19690 // v8i32 => Always profitable
19692 // FIXME: There a couple of possible improvements:
19694 // 1) Support for i8 and i16 vectors (needs measurements if popcnt enabled).
19695 // 2) Use strategies from http://wm.ite.pl/articles/sse-popcount.html
19697 assert(EltVT.isInteger() && (Len == 32 || Len == 64) && Len % 8 == 0 &&
19698 "CTPOP not implemented for this vector element type.");
19700 // X86 canonicalize ANDs to vXi64, generate the appropriate bitcasts to avoid
19701 // extra legalization.
19702 bool NeedsBitcast = EltVT == MVT::i32;
19703 MVT BitcastVT = VT.is256BitVector() ? MVT::v4i64 : MVT::v2i64;
19705 SDValue Cst55 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), EltVT);
19706 SDValue Cst33 = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), EltVT);
19707 SDValue Cst0F = DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), EltVT);
19709 // v = v - ((v >> 1) & 0x55555555...)
19710 SmallVector<SDValue, 8> Ones(NumElts, DAG.getConstant(1, EltVT));
19711 SDValue OnesV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Ones);
19712 SDValue Srl = DAG.getNode(ISD::SRL, dl, VT, Op, OnesV);
19714 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19716 SmallVector<SDValue, 8> Mask55(NumElts, Cst55);
19717 SDValue M55 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask55);
19719 M55 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M55);
19721 SDValue And = DAG.getNode(ISD::AND, dl, Srl.getValueType(), Srl, M55);
19722 if (VT != And.getValueType())
19723 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19724 SDValue Sub = DAG.getNode(ISD::SUB, dl, VT, Op, And);
19726 // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...)
19727 SmallVector<SDValue, 8> Mask33(NumElts, Cst33);
19728 SDValue M33 = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask33);
19729 SmallVector<SDValue, 8> Twos(NumElts, DAG.getConstant(2, EltVT));
19730 SDValue TwosV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Twos);
19732 Srl = DAG.getNode(ISD::SRL, dl, VT, Sub, TwosV);
19733 if (NeedsBitcast) {
19734 Srl = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Srl);
19735 M33 = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M33);
19736 Sub = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Sub);
19739 SDValue AndRHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Srl, M33);
19740 SDValue AndLHS = DAG.getNode(ISD::AND, dl, M33.getValueType(), Sub, M33);
19741 if (VT != AndRHS.getValueType()) {
19742 AndRHS = DAG.getNode(ISD::BITCAST, dl, VT, AndRHS);
19743 AndLHS = DAG.getNode(ISD::BITCAST, dl, VT, AndLHS);
19745 SDValue Add = DAG.getNode(ISD::ADD, dl, VT, AndLHS, AndRHS);
19747 // v = (v + (v >> 4)) & 0x0F0F0F0F...
19748 SmallVector<SDValue, 8> Fours(NumElts, DAG.getConstant(4, EltVT));
19749 SDValue FoursV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Fours);
19750 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, FoursV);
19751 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19753 SmallVector<SDValue, 8> Mask0F(NumElts, Cst0F);
19754 SDValue M0F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Mask0F);
19755 if (NeedsBitcast) {
19756 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19757 M0F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M0F);
19759 And = DAG.getNode(ISD::AND, dl, M0F.getValueType(), Add, M0F);
19760 if (VT != And.getValueType())
19761 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19763 // The algorithm mentioned above uses:
19764 // v = (v * 0x01010101...) >> (Len - 8)
19766 // Change it to use vector adds + vector shifts which yield faster results on
19767 // Haswell than using vector integer multiplication.
19769 // For i32 elements:
19770 // v = v + (v >> 8)
19771 // v = v + (v >> 16)
19773 // For i64 elements:
19774 // v = v + (v >> 8)
19775 // v = v + (v >> 16)
19776 // v = v + (v >> 32)
19779 SmallVector<SDValue, 8> Csts;
19780 for (unsigned i = 8; i <= Len/2; i *= 2) {
19781 Csts.assign(NumElts, DAG.getConstant(i, EltVT));
19782 SDValue CstsV = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Csts);
19783 Srl = DAG.getNode(ISD::SRL, dl, VT, Add, CstsV);
19784 Add = DAG.getNode(ISD::ADD, dl, VT, Add, Srl);
19788 // The result is on the least significant 6-bits on i32 and 7-bits on i64.
19789 SDValue Cst3F = DAG.getConstant(APInt(Len, Len == 32 ? 0x3F : 0x7F), EltVT);
19790 SmallVector<SDValue, 8> Cst3FV(NumElts, Cst3F);
19791 SDValue M3F = DAG.getNode(ISD::BUILD_VECTOR, dl, VT, Cst3FV);
19792 if (NeedsBitcast) {
19793 Add = DAG.getNode(ISD::BITCAST, dl, BitcastVT, Add);
19794 M3F = DAG.getNode(ISD::BITCAST, dl, BitcastVT, M3F);
19796 And = DAG.getNode(ISD::AND, dl, M3F.getValueType(), Add, M3F);
19797 if (VT != And.getValueType())
19798 And = DAG.getNode(ISD::BITCAST, dl, VT, And);
19803 static SDValue LowerLOAD_SUB(SDValue Op, SelectionDAG &DAG) {
19804 SDNode *Node = Op.getNode();
19806 EVT T = Node->getValueType(0);
19807 SDValue negOp = DAG.getNode(ISD::SUB, dl, T,
19808 DAG.getConstant(0, T), Node->getOperand(2));
19809 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl,
19810 cast<AtomicSDNode>(Node)->getMemoryVT(),
19811 Node->getOperand(0),
19812 Node->getOperand(1), negOp,
19813 cast<AtomicSDNode>(Node)->getMemOperand(),
19814 cast<AtomicSDNode>(Node)->getOrdering(),
19815 cast<AtomicSDNode>(Node)->getSynchScope());
19818 static SDValue LowerATOMIC_STORE(SDValue Op, SelectionDAG &DAG) {
19819 SDNode *Node = Op.getNode();
19821 EVT VT = cast<AtomicSDNode>(Node)->getMemoryVT();
19823 // Convert seq_cst store -> xchg
19824 // Convert wide store -> swap (-> cmpxchg8b/cmpxchg16b)
19825 // FIXME: On 32-bit, store -> fist or movq would be more efficient
19826 // (The only way to get a 16-byte store is cmpxchg16b)
19827 // FIXME: 16-byte ATOMIC_SWAP isn't actually hooked up at the moment.
19828 if (cast<AtomicSDNode>(Node)->getOrdering() == SequentiallyConsistent ||
19829 !DAG.getTargetLoweringInfo().isTypeLegal(VT)) {
19830 SDValue Swap = DAG.getAtomic(ISD::ATOMIC_SWAP, dl,
19831 cast<AtomicSDNode>(Node)->getMemoryVT(),
19832 Node->getOperand(0),
19833 Node->getOperand(1), Node->getOperand(2),
19834 cast<AtomicSDNode>(Node)->getMemOperand(),
19835 cast<AtomicSDNode>(Node)->getOrdering(),
19836 cast<AtomicSDNode>(Node)->getSynchScope());
19837 return Swap.getValue(1);
19839 // Other atomic stores have a simple pattern.
19843 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) {
19844 EVT VT = Op.getNode()->getSimpleValueType(0);
19846 // Let legalize expand this if it isn't a legal type yet.
19847 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
19850 SDVTList VTs = DAG.getVTList(VT, MVT::i32);
19853 bool ExtraOp = false;
19854 switch (Op.getOpcode()) {
19855 default: llvm_unreachable("Invalid code");
19856 case ISD::ADDC: Opc = X86ISD::ADD; break;
19857 case ISD::ADDE: Opc = X86ISD::ADC; ExtraOp = true; break;
19858 case ISD::SUBC: Opc = X86ISD::SUB; break;
19859 case ISD::SUBE: Opc = X86ISD::SBB; ExtraOp = true; break;
19863 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19865 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0),
19866 Op.getOperand(1), Op.getOperand(2));
19869 static SDValue LowerFSINCOS(SDValue Op, const X86Subtarget *Subtarget,
19870 SelectionDAG &DAG) {
19871 assert(Subtarget->isTargetDarwin() && Subtarget->is64Bit());
19873 // For MacOSX, we want to call an alternative entry point: __sincos_stret,
19874 // which returns the values as { float, float } (in XMM0) or
19875 // { double, double } (which is returned in XMM0, XMM1).
19877 SDValue Arg = Op.getOperand(0);
19878 EVT ArgVT = Arg.getValueType();
19879 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
19881 TargetLowering::ArgListTy Args;
19882 TargetLowering::ArgListEntry Entry;
19886 Entry.isSExt = false;
19887 Entry.isZExt = false;
19888 Args.push_back(Entry);
19890 bool isF64 = ArgVT == MVT::f64;
19891 // Only optimize x86_64 for now. i386 is a bit messy. For f32,
19892 // the small struct {f32, f32} is returned in (eax, edx). For f64,
19893 // the results are returned via SRet in memory.
19894 const char *LibcallName = isF64 ? "__sincos_stret" : "__sincosf_stret";
19895 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
19896 SDValue Callee = DAG.getExternalSymbol(LibcallName, TLI.getPointerTy());
19898 Type *RetTy = isF64
19899 ? (Type*)StructType::get(ArgTy, ArgTy, nullptr)
19900 : (Type*)VectorType::get(ArgTy, 4);
19902 TargetLowering::CallLoweringInfo CLI(DAG);
19903 CLI.setDebugLoc(dl).setChain(DAG.getEntryNode())
19904 .setCallee(CallingConv::C, RetTy, Callee, std::move(Args), 0);
19906 std::pair<SDValue, SDValue> CallResult = TLI.LowerCallTo(CLI);
19909 // Returned in xmm0 and xmm1.
19910 return CallResult.first;
19912 // Returned in bits 0:31 and 32:64 xmm0.
19913 SDValue SinVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19914 CallResult.first, DAG.getIntPtrConstant(0));
19915 SDValue CosVal = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ArgVT,
19916 CallResult.first, DAG.getIntPtrConstant(1));
19917 SDVTList Tys = DAG.getVTList(ArgVT, ArgVT);
19918 return DAG.getNode(ISD::MERGE_VALUES, dl, Tys, SinVal, CosVal);
19921 /// LowerOperation - Provide custom lowering hooks for some operations.
19923 SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
19924 switch (Op.getOpcode()) {
19925 default: llvm_unreachable("Should not custom lower this!");
19926 case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op,DAG);
19927 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, Subtarget, DAG);
19928 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS:
19929 return LowerCMP_SWAP(Op, Subtarget, DAG);
19930 case ISD::CTPOP: return LowerCTPOP(Op, Subtarget, DAG);
19931 case ISD::ATOMIC_LOAD_SUB: return LowerLOAD_SUB(Op,DAG);
19932 case ISD::ATOMIC_STORE: return LowerATOMIC_STORE(Op,DAG);
19933 case ISD::BUILD_VECTOR: return LowerBUILD_VECTOR(Op, DAG);
19934 case ISD::CONCAT_VECTORS: return LowerCONCAT_VECTORS(Op, DAG);
19935 case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
19936 case ISD::VSELECT: return LowerVSELECT(Op, DAG);
19937 case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
19938 case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
19939 case ISD::EXTRACT_SUBVECTOR: return LowerEXTRACT_SUBVECTOR(Op,Subtarget,DAG);
19940 case ISD::INSERT_SUBVECTOR: return LowerINSERT_SUBVECTOR(Op, Subtarget,DAG);
19941 case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
19942 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
19943 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
19944 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
19945 case ISD::ExternalSymbol: return LowerExternalSymbol(Op, DAG);
19946 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
19947 case ISD::SHL_PARTS:
19948 case ISD::SRA_PARTS:
19949 case ISD::SRL_PARTS: return LowerShiftParts(Op, DAG);
19950 case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
19951 case ISD::UINT_TO_FP: return LowerUINT_TO_FP(Op, DAG);
19952 case ISD::TRUNCATE: return LowerTRUNCATE(Op, DAG);
19953 case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, Subtarget, DAG);
19954 case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, Subtarget, DAG);
19955 case ISD::ANY_EXTEND: return LowerANY_EXTEND(Op, Subtarget, DAG);
19956 case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
19957 case ISD::FP_TO_UINT: return LowerFP_TO_UINT(Op, DAG);
19958 case ISD::FP_EXTEND: return LowerFP_EXTEND(Op, DAG);
19959 case ISD::LOAD: return LowerExtendedLoad(Op, Subtarget, DAG);
19961 case ISD::FNEG: return LowerFABSorFNEG(Op, DAG);
19962 case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
19963 case ISD::FGETSIGN: return LowerFGETSIGN(Op, DAG);
19964 case ISD::SETCC: return LowerSETCC(Op, DAG);
19965 case ISD::SELECT: return LowerSELECT(Op, DAG);
19966 case ISD::BRCOND: return LowerBRCOND(Op, DAG);
19967 case ISD::JumpTable: return LowerJumpTable(Op, DAG);
19968 case ISD::VASTART: return LowerVASTART(Op, DAG);
19969 case ISD::VAARG: return LowerVAARG(Op, DAG);
19970 case ISD::VACOPY: return LowerVACOPY(Op, Subtarget, DAG);
19971 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, Subtarget, DAG);
19972 case ISD::INTRINSIC_VOID:
19973 case ISD::INTRINSIC_W_CHAIN: return LowerINTRINSIC_W_CHAIN(Op, Subtarget, DAG);
19974 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
19975 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
19976 case ISD::FRAME_TO_ARGS_OFFSET:
19977 return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
19978 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
19979 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
19980 case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
19981 case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
19982 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
19983 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
19984 case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
19985 case ISD::CTLZ: return LowerCTLZ(Op, DAG);
19986 case ISD::CTLZ_ZERO_UNDEF: return LowerCTLZ_ZERO_UNDEF(Op, DAG);
19987 case ISD::CTTZ: return LowerCTTZ(Op, DAG);
19988 case ISD::MUL: return LowerMUL(Op, Subtarget, DAG);
19989 case ISD::UMUL_LOHI:
19990 case ISD::SMUL_LOHI: return LowerMUL_LOHI(Op, Subtarget, DAG);
19993 case ISD::SHL: return LowerShift(Op, Subtarget, DAG);
19999 case ISD::UMULO: return LowerXALUO(Op, DAG);
20000 case ISD::READCYCLECOUNTER: return LowerREADCYCLECOUNTER(Op, Subtarget,DAG);
20001 case ISD::BITCAST: return LowerBITCAST(Op, Subtarget, DAG);
20005 case ISD::SUBE: return LowerADDC_ADDE_SUBC_SUBE(Op, DAG);
20006 case ISD::ADD: return LowerADD(Op, DAG);
20007 case ISD::SUB: return LowerSUB(Op, DAG);
20008 case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
20012 /// ReplaceNodeResults - Replace a node with an illegal result type
20013 /// with a new node built out of custom code.
20014 void X86TargetLowering::ReplaceNodeResults(SDNode *N,
20015 SmallVectorImpl<SDValue>&Results,
20016 SelectionDAG &DAG) const {
20018 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
20019 switch (N->getOpcode()) {
20021 llvm_unreachable("Do not know how to custom type legalize this operation!");
20022 // We might have generated v2f32 FMIN/FMAX operations. Widen them to v4f32.
20023 case X86ISD::FMINC:
20025 case X86ISD::FMAXC:
20026 case X86ISD::FMAX: {
20027 EVT VT = N->getValueType(0);
20028 if (VT != MVT::v2f32)
20029 llvm_unreachable("Unexpected type (!= v2f32) on FMIN/FMAX.");
20030 SDValue UNDEF = DAG.getUNDEF(VT);
20031 SDValue LHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20032 N->getOperand(0), UNDEF);
20033 SDValue RHS = DAG.getNode(ISD::CONCAT_VECTORS, dl, MVT::v4f32,
20034 N->getOperand(1), UNDEF);
20035 Results.push_back(DAG.getNode(N->getOpcode(), dl, MVT::v4f32, LHS, RHS));
20038 case ISD::SIGN_EXTEND_INREG:
20043 // We don't want to expand or promote these.
20050 case ISD::UDIVREM: {
20051 SDValue V = LowerWin64_i128OP(SDValue(N,0), DAG);
20052 Results.push_back(V);
20055 case ISD::FP_TO_SINT:
20056 case ISD::FP_TO_UINT: {
20057 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT;
20059 if (!IsSigned && !isIntegerTypeFTOL(SDValue(N, 0).getValueType()))
20062 std::pair<SDValue,SDValue> Vals =
20063 FP_TO_INTHelper(SDValue(N, 0), DAG, IsSigned, /*IsReplace=*/ true);
20064 SDValue FIST = Vals.first, StackSlot = Vals.second;
20065 if (FIST.getNode()) {
20066 EVT VT = N->getValueType(0);
20067 // Return a load from the stack slot.
20068 if (StackSlot.getNode())
20069 Results.push_back(DAG.getLoad(VT, dl, FIST, StackSlot,
20070 MachinePointerInfo(),
20071 false, false, false, 0));
20073 Results.push_back(FIST);
20077 case ISD::UINT_TO_FP: {
20078 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20079 if (N->getOperand(0).getValueType() != MVT::v2i32 ||
20080 N->getValueType(0) != MVT::v2f32)
20082 SDValue ZExtIn = DAG.getNode(ISD::ZERO_EXTEND, dl, MVT::v2i64,
20084 SDValue Bias = DAG.getConstantFP(BitsToDouble(0x4330000000000000ULL),
20086 SDValue VBias = DAG.getNode(ISD::BUILD_VECTOR, dl, MVT::v2f64, Bias, Bias);
20087 SDValue Or = DAG.getNode(ISD::OR, dl, MVT::v2i64, ZExtIn,
20088 DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, VBias));
20089 Or = DAG.getNode(ISD::BITCAST, dl, MVT::v2f64, Or);
20090 SDValue Sub = DAG.getNode(ISD::FSUB, dl, MVT::v2f64, Or, VBias);
20091 Results.push_back(DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, Sub));
20094 case ISD::FP_ROUND: {
20095 if (!TLI.isTypeLegal(N->getOperand(0).getValueType()))
20097 SDValue V = DAG.getNode(X86ISD::VFPROUND, dl, MVT::v4f32, N->getOperand(0));
20098 Results.push_back(V);
20101 case ISD::INTRINSIC_W_CHAIN: {
20102 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue();
20104 default : llvm_unreachable("Do not know how to custom type "
20105 "legalize this intrinsic operation!");
20106 case Intrinsic::x86_rdtsc:
20107 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20109 case Intrinsic::x86_rdtscp:
20110 return getReadTimeStampCounter(N, dl, X86ISD::RDTSCP_DAG, DAG, Subtarget,
20112 case Intrinsic::x86_rdpmc:
20113 return getReadPerformanceCounter(N, dl, DAG, Subtarget, Results);
20116 case ISD::READCYCLECOUNTER: {
20117 return getReadTimeStampCounter(N, dl, X86ISD::RDTSC_DAG, DAG, Subtarget,
20120 case ISD::ATOMIC_CMP_SWAP_WITH_SUCCESS: {
20121 EVT T = N->getValueType(0);
20122 assert((T == MVT::i64 || T == MVT::i128) && "can only expand cmpxchg pair");
20123 bool Regs64bit = T == MVT::i128;
20124 EVT HalfT = Regs64bit ? MVT::i64 : MVT::i32;
20125 SDValue cpInL, cpInH;
20126 cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20127 DAG.getConstant(0, HalfT));
20128 cpInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(2),
20129 DAG.getConstant(1, HalfT));
20130 cpInL = DAG.getCopyToReg(N->getOperand(0), dl,
20131 Regs64bit ? X86::RAX : X86::EAX,
20133 cpInH = DAG.getCopyToReg(cpInL.getValue(0), dl,
20134 Regs64bit ? X86::RDX : X86::EDX,
20135 cpInH, cpInL.getValue(1));
20136 SDValue swapInL, swapInH;
20137 swapInL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20138 DAG.getConstant(0, HalfT));
20139 swapInH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, HalfT, N->getOperand(3),
20140 DAG.getConstant(1, HalfT));
20141 swapInL = DAG.getCopyToReg(cpInH.getValue(0), dl,
20142 Regs64bit ? X86::RBX : X86::EBX,
20143 swapInL, cpInH.getValue(1));
20144 swapInH = DAG.getCopyToReg(swapInL.getValue(0), dl,
20145 Regs64bit ? X86::RCX : X86::ECX,
20146 swapInH, swapInL.getValue(1));
20147 SDValue Ops[] = { swapInH.getValue(0),
20149 swapInH.getValue(1) };
20150 SDVTList Tys = DAG.getVTList(MVT::Other, MVT::Glue);
20151 MachineMemOperand *MMO = cast<AtomicSDNode>(N)->getMemOperand();
20152 unsigned Opcode = Regs64bit ? X86ISD::LCMPXCHG16_DAG :
20153 X86ISD::LCMPXCHG8_DAG;
20154 SDValue Result = DAG.getMemIntrinsicNode(Opcode, dl, Tys, Ops, T, MMO);
20155 SDValue cpOutL = DAG.getCopyFromReg(Result.getValue(0), dl,
20156 Regs64bit ? X86::RAX : X86::EAX,
20157 HalfT, Result.getValue(1));
20158 SDValue cpOutH = DAG.getCopyFromReg(cpOutL.getValue(1), dl,
20159 Regs64bit ? X86::RDX : X86::EDX,
20160 HalfT, cpOutL.getValue(2));
20161 SDValue OpsF[] = { cpOutL.getValue(0), cpOutH.getValue(0)};
20163 SDValue EFLAGS = DAG.getCopyFromReg(cpOutH.getValue(1), dl, X86::EFLAGS,
20164 MVT::i32, cpOutH.getValue(2));
20166 DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
20167 DAG.getConstant(X86::COND_E, MVT::i8), EFLAGS);
20168 Success = DAG.getZExtOrTrunc(Success, dl, N->getValueType(1));
20170 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, dl, T, OpsF));
20171 Results.push_back(Success);
20172 Results.push_back(EFLAGS.getValue(1));
20175 case ISD::ATOMIC_SWAP:
20176 case ISD::ATOMIC_LOAD_ADD:
20177 case ISD::ATOMIC_LOAD_SUB:
20178 case ISD::ATOMIC_LOAD_AND:
20179 case ISD::ATOMIC_LOAD_OR:
20180 case ISD::ATOMIC_LOAD_XOR:
20181 case ISD::ATOMIC_LOAD_NAND:
20182 case ISD::ATOMIC_LOAD_MIN:
20183 case ISD::ATOMIC_LOAD_MAX:
20184 case ISD::ATOMIC_LOAD_UMIN:
20185 case ISD::ATOMIC_LOAD_UMAX:
20186 case ISD::ATOMIC_LOAD: {
20187 // Delegate to generic TypeLegalization. Situations we can really handle
20188 // should have already been dealt with by AtomicExpandPass.cpp.
20191 case ISD::BITCAST: {
20192 assert(Subtarget->hasSSE2() && "Requires at least SSE2!");
20193 EVT DstVT = N->getValueType(0);
20194 EVT SrcVT = N->getOperand(0)->getValueType(0);
20196 if (SrcVT != MVT::f64 ||
20197 (DstVT != MVT::v2i32 && DstVT != MVT::v4i16 && DstVT != MVT::v8i8))
20200 unsigned NumElts = DstVT.getVectorNumElements();
20201 EVT SVT = DstVT.getVectorElementType();
20202 EVT WiderVT = EVT::getVectorVT(*DAG.getContext(), SVT, NumElts * 2);
20203 SDValue Expanded = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl,
20204 MVT::v2f64, N->getOperand(0));
20205 SDValue ToVecInt = DAG.getNode(ISD::BITCAST, dl, WiderVT, Expanded);
20207 if (ExperimentalVectorWideningLegalization) {
20208 // If we are legalizing vectors by widening, we already have the desired
20209 // legal vector type, just return it.
20210 Results.push_back(ToVecInt);
20214 SmallVector<SDValue, 8> Elts;
20215 for (unsigned i = 0, e = NumElts; i != e; ++i)
20216 Elts.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, SVT,
20217 ToVecInt, DAG.getIntPtrConstant(i)));
20219 Results.push_back(DAG.getNode(ISD::BUILD_VECTOR, dl, DstVT, Elts));
20224 const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
20226 default: return nullptr;
20227 case X86ISD::BSF: return "X86ISD::BSF";
20228 case X86ISD::BSR: return "X86ISD::BSR";
20229 case X86ISD::SHLD: return "X86ISD::SHLD";
20230 case X86ISD::SHRD: return "X86ISD::SHRD";
20231 case X86ISD::FAND: return "X86ISD::FAND";
20232 case X86ISD::FANDN: return "X86ISD::FANDN";
20233 case X86ISD::FOR: return "X86ISD::FOR";
20234 case X86ISD::FXOR: return "X86ISD::FXOR";
20235 case X86ISD::FSRL: return "X86ISD::FSRL";
20236 case X86ISD::FILD: return "X86ISD::FILD";
20237 case X86ISD::FILD_FLAG: return "X86ISD::FILD_FLAG";
20238 case X86ISD::FP_TO_INT16_IN_MEM: return "X86ISD::FP_TO_INT16_IN_MEM";
20239 case X86ISD::FP_TO_INT32_IN_MEM: return "X86ISD::FP_TO_INT32_IN_MEM";
20240 case X86ISD::FP_TO_INT64_IN_MEM: return "X86ISD::FP_TO_INT64_IN_MEM";
20241 case X86ISD::FLD: return "X86ISD::FLD";
20242 case X86ISD::FST: return "X86ISD::FST";
20243 case X86ISD::CALL: return "X86ISD::CALL";
20244 case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
20245 case X86ISD::RDTSCP_DAG: return "X86ISD::RDTSCP_DAG";
20246 case X86ISD::RDPMC_DAG: return "X86ISD::RDPMC_DAG";
20247 case X86ISD::BT: return "X86ISD::BT";
20248 case X86ISD::CMP: return "X86ISD::CMP";
20249 case X86ISD::COMI: return "X86ISD::COMI";
20250 case X86ISD::UCOMI: return "X86ISD::UCOMI";
20251 case X86ISD::CMPM: return "X86ISD::CMPM";
20252 case X86ISD::CMPMU: return "X86ISD::CMPMU";
20253 case X86ISD::SETCC: return "X86ISD::SETCC";
20254 case X86ISD::SETCC_CARRY: return "X86ISD::SETCC_CARRY";
20255 case X86ISD::FSETCC: return "X86ISD::FSETCC";
20256 case X86ISD::CMOV: return "X86ISD::CMOV";
20257 case X86ISD::BRCOND: return "X86ISD::BRCOND";
20258 case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
20259 case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
20260 case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
20261 case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
20262 case X86ISD::Wrapper: return "X86ISD::Wrapper";
20263 case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
20264 case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
20265 case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
20266 case X86ISD::INSERTPS: return "X86ISD::INSERTPS";
20267 case X86ISD::PINSRB: return "X86ISD::PINSRB";
20268 case X86ISD::PINSRW: return "X86ISD::PINSRW";
20269 case X86ISD::PSHUFB: return "X86ISD::PSHUFB";
20270 case X86ISD::ANDNP: return "X86ISD::ANDNP";
20271 case X86ISD::PSIGN: return "X86ISD::PSIGN";
20272 case X86ISD::BLENDI: return "X86ISD::BLENDI";
20273 case X86ISD::SHRUNKBLEND: return "X86ISD::SHRUNKBLEND";
20274 case X86ISD::SUBUS: return "X86ISD::SUBUS";
20275 case X86ISD::HADD: return "X86ISD::HADD";
20276 case X86ISD::HSUB: return "X86ISD::HSUB";
20277 case X86ISD::FHADD: return "X86ISD::FHADD";
20278 case X86ISD::FHSUB: return "X86ISD::FHSUB";
20279 case X86ISD::UMAX: return "X86ISD::UMAX";
20280 case X86ISD::UMIN: return "X86ISD::UMIN";
20281 case X86ISD::SMAX: return "X86ISD::SMAX";
20282 case X86ISD::SMIN: return "X86ISD::SMIN";
20283 case X86ISD::FMAX: return "X86ISD::FMAX";
20284 case X86ISD::FMIN: return "X86ISD::FMIN";
20285 case X86ISD::FMAXC: return "X86ISD::FMAXC";
20286 case X86ISD::FMINC: return "X86ISD::FMINC";
20287 case X86ISD::FRSQRT: return "X86ISD::FRSQRT";
20288 case X86ISD::FRCP: return "X86ISD::FRCP";
20289 case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
20290 case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
20291 case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
20292 case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
20293 case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
20294 case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
20295 case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
20296 case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
20297 case X86ISD::FNSTSW16r: return "X86ISD::FNSTSW16r";
20298 case X86ISD::LCMPXCHG_DAG: return "X86ISD::LCMPXCHG_DAG";
20299 case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
20300 case X86ISD::LCMPXCHG16_DAG: return "X86ISD::LCMPXCHG16_DAG";
20301 case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
20302 case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
20303 case X86ISD::VZEXT: return "X86ISD::VZEXT";
20304 case X86ISD::VSEXT: return "X86ISD::VSEXT";
20305 case X86ISD::VTRUNC: return "X86ISD::VTRUNC";
20306 case X86ISD::VTRUNCM: return "X86ISD::VTRUNCM";
20307 case X86ISD::VINSERT: return "X86ISD::VINSERT";
20308 case X86ISD::VFPEXT: return "X86ISD::VFPEXT";
20309 case X86ISD::VFPROUND: return "X86ISD::VFPROUND";
20310 case X86ISD::VSHLDQ: return "X86ISD::VSHLDQ";
20311 case X86ISD::VSRLDQ: return "X86ISD::VSRLDQ";
20312 case X86ISD::VSHL: return "X86ISD::VSHL";
20313 case X86ISD::VSRL: return "X86ISD::VSRL";
20314 case X86ISD::VSRA: return "X86ISD::VSRA";
20315 case X86ISD::VSHLI: return "X86ISD::VSHLI";
20316 case X86ISD::VSRLI: return "X86ISD::VSRLI";
20317 case X86ISD::VSRAI: return "X86ISD::VSRAI";
20318 case X86ISD::CMPP: return "X86ISD::CMPP";
20319 case X86ISD::PCMPEQ: return "X86ISD::PCMPEQ";
20320 case X86ISD::PCMPGT: return "X86ISD::PCMPGT";
20321 case X86ISD::PCMPEQM: return "X86ISD::PCMPEQM";
20322 case X86ISD::PCMPGTM: return "X86ISD::PCMPGTM";
20323 case X86ISD::ADD: return "X86ISD::ADD";
20324 case X86ISD::SUB: return "X86ISD::SUB";
20325 case X86ISD::ADC: return "X86ISD::ADC";
20326 case X86ISD::SBB: return "X86ISD::SBB";
20327 case X86ISD::SMUL: return "X86ISD::SMUL";
20328 case X86ISD::UMUL: return "X86ISD::UMUL";
20329 case X86ISD::SMUL8: return "X86ISD::SMUL8";
20330 case X86ISD::UMUL8: return "X86ISD::UMUL8";
20331 case X86ISD::SDIVREM8_SEXT_HREG: return "X86ISD::SDIVREM8_SEXT_HREG";
20332 case X86ISD::UDIVREM8_ZEXT_HREG: return "X86ISD::UDIVREM8_ZEXT_HREG";
20333 case X86ISD::INC: return "X86ISD::INC";
20334 case X86ISD::DEC: return "X86ISD::DEC";
20335 case X86ISD::OR: return "X86ISD::OR";
20336 case X86ISD::XOR: return "X86ISD::XOR";
20337 case X86ISD::AND: return "X86ISD::AND";
20338 case X86ISD::BEXTR: return "X86ISD::BEXTR";
20339 case X86ISD::MUL_IMM: return "X86ISD::MUL_IMM";
20340 case X86ISD::PTEST: return "X86ISD::PTEST";
20341 case X86ISD::TESTP: return "X86ISD::TESTP";
20342 case X86ISD::TESTM: return "X86ISD::TESTM";
20343 case X86ISD::TESTNM: return "X86ISD::TESTNM";
20344 case X86ISD::KORTEST: return "X86ISD::KORTEST";
20345 case X86ISD::PACKSS: return "X86ISD::PACKSS";
20346 case X86ISD::PACKUS: return "X86ISD::PACKUS";
20347 case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
20348 case X86ISD::VALIGN: return "X86ISD::VALIGN";
20349 case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
20350 case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
20351 case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";
20352 case X86ISD::SHUFP: return "X86ISD::SHUFP";
20353 case X86ISD::MOVLHPS: return "X86ISD::MOVLHPS";
20354 case X86ISD::MOVLHPD: return "X86ISD::MOVLHPD";
20355 case X86ISD::MOVHLPS: return "X86ISD::MOVHLPS";
20356 case X86ISD::MOVLPS: return "X86ISD::MOVLPS";
20357 case X86ISD::MOVLPD: return "X86ISD::MOVLPD";
20358 case X86ISD::MOVDDUP: return "X86ISD::MOVDDUP";
20359 case X86ISD::MOVSHDUP: return "X86ISD::MOVSHDUP";
20360 case X86ISD::MOVSLDUP: return "X86ISD::MOVSLDUP";
20361 case X86ISD::MOVSD: return "X86ISD::MOVSD";
20362 case X86ISD::MOVSS: return "X86ISD::MOVSS";
20363 case X86ISD::UNPCKL: return "X86ISD::UNPCKL";
20364 case X86ISD::UNPCKH: return "X86ISD::UNPCKH";
20365 case X86ISD::VBROADCAST: return "X86ISD::VBROADCAST";
20366 case X86ISD::VBROADCASTM: return "X86ISD::VBROADCASTM";
20367 case X86ISD::VEXTRACT: return "X86ISD::VEXTRACT";
20368 case X86ISD::VPERMILPI: return "X86ISD::VPERMILPI";
20369 case X86ISD::VPERM2X128: return "X86ISD::VPERM2X128";
20370 case X86ISD::VPERMV: return "X86ISD::VPERMV";
20371 case X86ISD::VPERMV3: return "X86ISD::VPERMV3";
20372 case X86ISD::VPERMIV3: return "X86ISD::VPERMIV3";
20373 case X86ISD::VPERMI: return "X86ISD::VPERMI";
20374 case X86ISD::PMULUDQ: return "X86ISD::PMULUDQ";
20375 case X86ISD::PMULDQ: return "X86ISD::PMULDQ";
20376 case X86ISD::VASTART_SAVE_XMM_REGS: return "X86ISD::VASTART_SAVE_XMM_REGS";
20377 case X86ISD::VAARG_64: return "X86ISD::VAARG_64";
20378 case X86ISD::WIN_ALLOCA: return "X86ISD::WIN_ALLOCA";
20379 case X86ISD::MEMBARRIER: return "X86ISD::MEMBARRIER";
20380 case X86ISD::SEG_ALLOCA: return "X86ISD::SEG_ALLOCA";
20381 case X86ISD::WIN_FTOL: return "X86ISD::WIN_FTOL";
20382 case X86ISD::SAHF: return "X86ISD::SAHF";
20383 case X86ISD::RDRAND: return "X86ISD::RDRAND";
20384 case X86ISD::RDSEED: return "X86ISD::RDSEED";
20385 case X86ISD::FMADD: return "X86ISD::FMADD";
20386 case X86ISD::FMSUB: return "X86ISD::FMSUB";
20387 case X86ISD::FNMADD: return "X86ISD::FNMADD";
20388 case X86ISD::FNMSUB: return "X86ISD::FNMSUB";
20389 case X86ISD::FMADDSUB: return "X86ISD::FMADDSUB";
20390 case X86ISD::FMSUBADD: return "X86ISD::FMSUBADD";
20391 case X86ISD::PCMPESTRI: return "X86ISD::PCMPESTRI";
20392 case X86ISD::PCMPISTRI: return "X86ISD::PCMPISTRI";
20393 case X86ISD::XTEST: return "X86ISD::XTEST";
20394 case X86ISD::COMPRESS: return "X86ISD::COMPRESS";
20395 case X86ISD::EXPAND: return "X86ISD::EXPAND";
20396 case X86ISD::SELECT: return "X86ISD::SELECT";
20397 case X86ISD::ADDSUB: return "X86ISD::ADDSUB";
20398 case X86ISD::RCP28: return "X86ISD::RCP28";
20399 case X86ISD::RSQRT28: return "X86ISD::RSQRT28";
20403 // isLegalAddressingMode - Return true if the addressing mode represented
20404 // by AM is legal for this target, for a load/store of the specified type.
20405 bool X86TargetLowering::isLegalAddressingMode(const AddrMode &AM,
20407 // X86 supports extremely general addressing modes.
20408 CodeModel::Model M = getTargetMachine().getCodeModel();
20409 Reloc::Model R = getTargetMachine().getRelocationModel();
20411 // X86 allows a sign-extended 32-bit immediate field as a displacement.
20412 if (!X86::isOffsetSuitableForCodeModel(AM.BaseOffs, M, AM.BaseGV != nullptr))
20417 Subtarget->ClassifyGlobalReference(AM.BaseGV, getTargetMachine());
20419 // If a reference to this global requires an extra load, we can't fold it.
20420 if (isGlobalStubReference(GVFlags))
20423 // If BaseGV requires a register for the PIC base, we cannot also have a
20424 // BaseReg specified.
20425 if (AM.HasBaseReg && isGlobalRelativeToPICBase(GVFlags))
20428 // If lower 4G is not available, then we must use rip-relative addressing.
20429 if ((M != CodeModel::Small || R != Reloc::Static) &&
20430 Subtarget->is64Bit() && (AM.BaseOffs || AM.Scale > 1))
20434 switch (AM.Scale) {
20440 // These scales always work.
20445 // These scales are formed with basereg+scalereg. Only accept if there is
20450 default: // Other stuff never works.
20457 bool X86TargetLowering::isVectorShiftByScalarCheap(Type *Ty) const {
20458 unsigned Bits = Ty->getScalarSizeInBits();
20460 // 8-bit shifts are always expensive, but versions with a scalar amount aren't
20461 // particularly cheaper than those without.
20465 // On AVX2 there are new vpsllv[dq] instructions (and other shifts), that make
20466 // variable shifts just as cheap as scalar ones.
20467 if (Subtarget->hasInt256() && (Bits == 32 || Bits == 64))
20470 // Otherwise, it's significantly cheaper to shift by a scalar amount than by a
20471 // fully general vector.
20475 bool X86TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
20476 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20478 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits();
20479 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits();
20480 return NumBits1 > NumBits2;
20483 bool X86TargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
20484 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
20487 if (!isTypeLegal(EVT::getEVT(Ty1)))
20490 assert(Ty1->getPrimitiveSizeInBits() <= 64 && "i128 is probably not a noop");
20492 // Assuming the caller doesn't have a zeroext or signext return parameter,
20493 // truncation all the way down to i1 is valid.
20497 bool X86TargetLowering::isLegalICmpImmediate(int64_t Imm) const {
20498 return isInt<32>(Imm);
20501 bool X86TargetLowering::isLegalAddImmediate(int64_t Imm) const {
20502 // Can also use sub to handle negated immediates.
20503 return isInt<32>(Imm);
20506 bool X86TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
20507 if (!VT1.isInteger() || !VT2.isInteger())
20509 unsigned NumBits1 = VT1.getSizeInBits();
20510 unsigned NumBits2 = VT2.getSizeInBits();
20511 return NumBits1 > NumBits2;
20514 bool X86TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const {
20515 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20516 return Ty1->isIntegerTy(32) && Ty2->isIntegerTy(64) && Subtarget->is64Bit();
20519 bool X86TargetLowering::isZExtFree(EVT VT1, EVT VT2) const {
20520 // x86-64 implicitly zero-extends 32-bit results in 64-bit registers.
20521 return VT1 == MVT::i32 && VT2 == MVT::i64 && Subtarget->is64Bit();
20524 bool X86TargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
20525 EVT VT1 = Val.getValueType();
20526 if (isZExtFree(VT1, VT2))
20529 if (Val.getOpcode() != ISD::LOAD)
20532 if (!VT1.isSimple() || !VT1.isInteger() ||
20533 !VT2.isSimple() || !VT2.isInteger())
20536 switch (VT1.getSimpleVT().SimpleTy) {
20541 // X86 has 8, 16, and 32-bit zero-extending loads.
20548 bool X86TargetLowering::isVectorLoadExtDesirable(SDValue) const { return true; }
20551 X86TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
20552 if (!(Subtarget->hasFMA() || Subtarget->hasFMA4()))
20555 VT = VT.getScalarType();
20557 if (!VT.isSimple())
20560 switch (VT.getSimpleVT().SimpleTy) {
20571 bool X86TargetLowering::isNarrowingProfitable(EVT VT1, EVT VT2) const {
20572 // i16 instructions are longer (0x66 prefix) and potentially slower.
20573 return !(VT1 == MVT::i32 && VT2 == MVT::i16);
20576 /// isShuffleMaskLegal - Targets can use this to indicate that they only
20577 /// support *some* VECTOR_SHUFFLE operations, those with specific masks.
20578 /// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
20579 /// are assumed to be legal.
20581 X86TargetLowering::isShuffleMaskLegal(const SmallVectorImpl<int> &M,
20583 if (!VT.isSimple())
20586 MVT SVT = VT.getSimpleVT();
20588 // Very little shuffling can be done for 64-bit vectors right now.
20589 if (VT.getSizeInBits() == 64)
20592 // This is an experimental legality test that is tailored to match the
20593 // legality test of the experimental lowering more closely. They are gated
20594 // separately to ease testing of performance differences.
20595 if (ExperimentalVectorShuffleLegality)
20596 // We only care that the types being shuffled are legal. The lowering can
20597 // handle any possible shuffle mask that results.
20598 return isTypeLegal(SVT);
20600 // If this is a single-input shuffle with no 128 bit lane crossings we can
20601 // lower it into pshufb.
20602 if ((SVT.is128BitVector() && Subtarget->hasSSSE3()) ||
20603 (SVT.is256BitVector() && Subtarget->hasInt256())) {
20604 bool isLegal = true;
20605 for (unsigned I = 0, E = M.size(); I != E; ++I) {
20606 if (M[I] >= (int)SVT.getVectorNumElements() ||
20607 ShuffleCrosses128bitLane(SVT, I, M[I])) {
20616 // FIXME: blends, shifts.
20617 return (SVT.getVectorNumElements() == 2 ||
20618 ShuffleVectorSDNode::isSplatMask(&M[0], VT) ||
20619 isMOVLMask(M, SVT) ||
20620 isCommutedMOVLMask(M, SVT) ||
20621 isMOVHLPSMask(M, SVT) ||
20622 isSHUFPMask(M, SVT) ||
20623 isSHUFPMask(M, SVT, /* Commuted */ true) ||
20624 isPSHUFDMask(M, SVT) ||
20625 isPSHUFDMask(M, SVT, /* SecondOperand */ true) ||
20626 isPSHUFHWMask(M, SVT, Subtarget->hasInt256()) ||
20627 isPSHUFLWMask(M, SVT, Subtarget->hasInt256()) ||
20628 isPALIGNRMask(M, SVT, Subtarget) ||
20629 isUNPCKLMask(M, SVT, Subtarget->hasInt256()) ||
20630 isUNPCKHMask(M, SVT, Subtarget->hasInt256()) ||
20631 isUNPCKL_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20632 isUNPCKH_v_undef_Mask(M, SVT, Subtarget->hasInt256()) ||
20633 isBlendMask(M, SVT, Subtarget->hasSSE41(), Subtarget->hasInt256()) ||
20634 (Subtarget->hasSSE41() && isINSERTPSMask(M, SVT)));
20638 X86TargetLowering::isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
20640 if (!VT.isSimple())
20643 MVT SVT = VT.getSimpleVT();
20645 // This is an experimental legality test that is tailored to match the
20646 // legality test of the experimental lowering more closely. They are gated
20647 // separately to ease testing of performance differences.
20648 if (ExperimentalVectorShuffleLegality)
20649 // The new vector shuffle lowering is very good at managing zero-inputs.
20650 return isShuffleMaskLegal(Mask, VT);
20652 unsigned NumElts = SVT.getVectorNumElements();
20653 // FIXME: This collection of masks seems suspect.
20656 if (NumElts == 4 && SVT.is128BitVector()) {
20657 return (isMOVLMask(Mask, SVT) ||
20658 isCommutedMOVLMask(Mask, SVT, true) ||
20659 isSHUFPMask(Mask, SVT) ||
20660 isSHUFPMask(Mask, SVT, /* Commuted */ true) ||
20661 isBlendMask(Mask, SVT, Subtarget->hasSSE41(),
20662 Subtarget->hasInt256()));
20667 //===----------------------------------------------------------------------===//
20668 // X86 Scheduler Hooks
20669 //===----------------------------------------------------------------------===//
20671 /// Utility function to emit xbegin specifying the start of an RTM region.
20672 static MachineBasicBlock *EmitXBegin(MachineInstr *MI, MachineBasicBlock *MBB,
20673 const TargetInstrInfo *TII) {
20674 DebugLoc DL = MI->getDebugLoc();
20676 const BasicBlock *BB = MBB->getBasicBlock();
20677 MachineFunction::iterator I = MBB;
20680 // For the v = xbegin(), we generate
20691 MachineBasicBlock *thisMBB = MBB;
20692 MachineFunction *MF = MBB->getParent();
20693 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
20694 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
20695 MF->insert(I, mainMBB);
20696 MF->insert(I, sinkMBB);
20698 // Transfer the remainder of BB and its successor edges to sinkMBB.
20699 sinkMBB->splice(sinkMBB->begin(), MBB,
20700 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
20701 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
20705 // # fallthrough to mainMBB
20706 // # abortion to sinkMBB
20707 BuildMI(thisMBB, DL, TII->get(X86::XBEGIN_4)).addMBB(sinkMBB);
20708 thisMBB->addSuccessor(mainMBB);
20709 thisMBB->addSuccessor(sinkMBB);
20713 BuildMI(mainMBB, DL, TII->get(X86::MOV32ri), X86::EAX).addImm(-1);
20714 mainMBB->addSuccessor(sinkMBB);
20717 // EAX is live into the sinkMBB
20718 sinkMBB->addLiveIn(X86::EAX);
20719 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
20720 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20723 MI->eraseFromParent();
20727 // FIXME: When we get size specific XMM0 registers, i.e. XMM0_V16I8
20728 // or XMM0_V32I8 in AVX all of this code can be replaced with that
20729 // in the .td file.
20730 static MachineBasicBlock *EmitPCMPSTRM(MachineInstr *MI, MachineBasicBlock *BB,
20731 const TargetInstrInfo *TII) {
20733 switch (MI->getOpcode()) {
20734 default: llvm_unreachable("illegal opcode!");
20735 case X86::PCMPISTRM128REG: Opc = X86::PCMPISTRM128rr; break;
20736 case X86::VPCMPISTRM128REG: Opc = X86::VPCMPISTRM128rr; break;
20737 case X86::PCMPISTRM128MEM: Opc = X86::PCMPISTRM128rm; break;
20738 case X86::VPCMPISTRM128MEM: Opc = X86::VPCMPISTRM128rm; break;
20739 case X86::PCMPESTRM128REG: Opc = X86::PCMPESTRM128rr; break;
20740 case X86::VPCMPESTRM128REG: Opc = X86::VPCMPESTRM128rr; break;
20741 case X86::PCMPESTRM128MEM: Opc = X86::PCMPESTRM128rm; break;
20742 case X86::VPCMPESTRM128MEM: Opc = X86::VPCMPESTRM128rm; break;
20745 DebugLoc dl = MI->getDebugLoc();
20746 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20748 unsigned NumArgs = MI->getNumOperands();
20749 for (unsigned i = 1; i < NumArgs; ++i) {
20750 MachineOperand &Op = MI->getOperand(i);
20751 if (!(Op.isReg() && Op.isImplicit()))
20752 MIB.addOperand(Op);
20754 if (MI->hasOneMemOperand())
20755 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20757 BuildMI(*BB, MI, dl,
20758 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20759 .addReg(X86::XMM0);
20761 MI->eraseFromParent();
20765 // FIXME: Custom handling because TableGen doesn't support multiple implicit
20766 // defs in an instruction pattern
20767 static MachineBasicBlock *EmitPCMPSTRI(MachineInstr *MI, MachineBasicBlock *BB,
20768 const TargetInstrInfo *TII) {
20770 switch (MI->getOpcode()) {
20771 default: llvm_unreachable("illegal opcode!");
20772 case X86::PCMPISTRIREG: Opc = X86::PCMPISTRIrr; break;
20773 case X86::VPCMPISTRIREG: Opc = X86::VPCMPISTRIrr; break;
20774 case X86::PCMPISTRIMEM: Opc = X86::PCMPISTRIrm; break;
20775 case X86::VPCMPISTRIMEM: Opc = X86::VPCMPISTRIrm; break;
20776 case X86::PCMPESTRIREG: Opc = X86::PCMPESTRIrr; break;
20777 case X86::VPCMPESTRIREG: Opc = X86::VPCMPESTRIrr; break;
20778 case X86::PCMPESTRIMEM: Opc = X86::PCMPESTRIrm; break;
20779 case X86::VPCMPESTRIMEM: Opc = X86::VPCMPESTRIrm; break;
20782 DebugLoc dl = MI->getDebugLoc();
20783 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(Opc));
20785 unsigned NumArgs = MI->getNumOperands(); // remove the results
20786 for (unsigned i = 1; i < NumArgs; ++i) {
20787 MachineOperand &Op = MI->getOperand(i);
20788 if (!(Op.isReg() && Op.isImplicit()))
20789 MIB.addOperand(Op);
20791 if (MI->hasOneMemOperand())
20792 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
20794 BuildMI(*BB, MI, dl,
20795 TII->get(TargetOpcode::COPY), MI->getOperand(0).getReg())
20798 MI->eraseFromParent();
20802 static MachineBasicBlock *EmitMonitor(MachineInstr *MI, MachineBasicBlock *BB,
20803 const X86Subtarget *Subtarget) {
20804 DebugLoc dl = MI->getDebugLoc();
20805 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20806 // Address into RAX/EAX, other two args into ECX, EDX.
20807 unsigned MemOpc = Subtarget->is64Bit() ? X86::LEA64r : X86::LEA32r;
20808 unsigned MemReg = Subtarget->is64Bit() ? X86::RAX : X86::EAX;
20809 MachineInstrBuilder MIB = BuildMI(*BB, MI, dl, TII->get(MemOpc), MemReg);
20810 for (int i = 0; i < X86::AddrNumOperands; ++i)
20811 MIB.addOperand(MI->getOperand(i));
20813 unsigned ValOps = X86::AddrNumOperands;
20814 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::ECX)
20815 .addReg(MI->getOperand(ValOps).getReg());
20816 BuildMI(*BB, MI, dl, TII->get(TargetOpcode::COPY), X86::EDX)
20817 .addReg(MI->getOperand(ValOps+1).getReg());
20819 // The instruction doesn't actually take any operands though.
20820 BuildMI(*BB, MI, dl, TII->get(X86::MONITORrrr));
20822 MI->eraseFromParent(); // The pseudo is gone now.
20826 MachineBasicBlock *
20827 X86TargetLowering::EmitVAARG64WithCustomInserter(MachineInstr *MI,
20828 MachineBasicBlock *MBB) const {
20829 // Emit va_arg instruction on X86-64.
20831 // Operands to this pseudo-instruction:
20832 // 0 ) Output : destination address (reg)
20833 // 1-5) Input : va_list address (addr, i64mem)
20834 // 6 ) ArgSize : Size (in bytes) of vararg type
20835 // 7 ) ArgMode : 0=overflow only, 1=use gp_offset, 2=use fp_offset
20836 // 8 ) Align : Alignment of type
20837 // 9 ) EFLAGS (implicit-def)
20839 assert(MI->getNumOperands() == 10 && "VAARG_64 should have 10 operands!");
20840 assert(X86::AddrNumOperands == 5 && "VAARG_64 assumes 5 address operands");
20842 unsigned DestReg = MI->getOperand(0).getReg();
20843 MachineOperand &Base = MI->getOperand(1);
20844 MachineOperand &Scale = MI->getOperand(2);
20845 MachineOperand &Index = MI->getOperand(3);
20846 MachineOperand &Disp = MI->getOperand(4);
20847 MachineOperand &Segment = MI->getOperand(5);
20848 unsigned ArgSize = MI->getOperand(6).getImm();
20849 unsigned ArgMode = MI->getOperand(7).getImm();
20850 unsigned Align = MI->getOperand(8).getImm();
20852 // Memory Reference
20853 assert(MI->hasOneMemOperand() && "Expected VAARG_64 to have one memoperand");
20854 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
20855 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
20857 // Machine Information
20858 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
20859 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
20860 const TargetRegisterClass *AddrRegClass = getRegClassFor(MVT::i64);
20861 const TargetRegisterClass *OffsetRegClass = getRegClassFor(MVT::i32);
20862 DebugLoc DL = MI->getDebugLoc();
20864 // struct va_list {
20867 // i64 overflow_area (address)
20868 // i64 reg_save_area (address)
20870 // sizeof(va_list) = 24
20871 // alignment(va_list) = 8
20873 unsigned TotalNumIntRegs = 6;
20874 unsigned TotalNumXMMRegs = 8;
20875 bool UseGPOffset = (ArgMode == 1);
20876 bool UseFPOffset = (ArgMode == 2);
20877 unsigned MaxOffset = TotalNumIntRegs * 8 +
20878 (UseFPOffset ? TotalNumXMMRegs * 16 : 0);
20880 /* Align ArgSize to a multiple of 8 */
20881 unsigned ArgSizeA8 = (ArgSize + 7) & ~7;
20882 bool NeedsAlign = (Align > 8);
20884 MachineBasicBlock *thisMBB = MBB;
20885 MachineBasicBlock *overflowMBB;
20886 MachineBasicBlock *offsetMBB;
20887 MachineBasicBlock *endMBB;
20889 unsigned OffsetDestReg = 0; // Argument address computed by offsetMBB
20890 unsigned OverflowDestReg = 0; // Argument address computed by overflowMBB
20891 unsigned OffsetReg = 0;
20893 if (!UseGPOffset && !UseFPOffset) {
20894 // If we only pull from the overflow region, we don't create a branch.
20895 // We don't need to alter control flow.
20896 OffsetDestReg = 0; // unused
20897 OverflowDestReg = DestReg;
20899 offsetMBB = nullptr;
20900 overflowMBB = thisMBB;
20903 // First emit code to check if gp_offset (or fp_offset) is below the bound.
20904 // If so, pull the argument from reg_save_area. (branch to offsetMBB)
20905 // If not, pull from overflow_area. (branch to overflowMBB)
20910 // offsetMBB overflowMBB
20915 // Registers for the PHI in endMBB
20916 OffsetDestReg = MRI.createVirtualRegister(AddrRegClass);
20917 OverflowDestReg = MRI.createVirtualRegister(AddrRegClass);
20919 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
20920 MachineFunction *MF = MBB->getParent();
20921 overflowMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20922 offsetMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20923 endMBB = MF->CreateMachineBasicBlock(LLVM_BB);
20925 MachineFunction::iterator MBBIter = MBB;
20928 // Insert the new basic blocks
20929 MF->insert(MBBIter, offsetMBB);
20930 MF->insert(MBBIter, overflowMBB);
20931 MF->insert(MBBIter, endMBB);
20933 // Transfer the remainder of MBB and its successor edges to endMBB.
20934 endMBB->splice(endMBB->begin(), thisMBB,
20935 std::next(MachineBasicBlock::iterator(MI)), thisMBB->end());
20936 endMBB->transferSuccessorsAndUpdatePHIs(thisMBB);
20938 // Make offsetMBB and overflowMBB successors of thisMBB
20939 thisMBB->addSuccessor(offsetMBB);
20940 thisMBB->addSuccessor(overflowMBB);
20942 // endMBB is a successor of both offsetMBB and overflowMBB
20943 offsetMBB->addSuccessor(endMBB);
20944 overflowMBB->addSuccessor(endMBB);
20946 // Load the offset value into a register
20947 OffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20948 BuildMI(thisMBB, DL, TII->get(X86::MOV32rm), OffsetReg)
20952 .addDisp(Disp, UseFPOffset ? 4 : 0)
20953 .addOperand(Segment)
20954 .setMemRefs(MMOBegin, MMOEnd);
20956 // Check if there is enough room left to pull this argument.
20957 BuildMI(thisMBB, DL, TII->get(X86::CMP32ri))
20959 .addImm(MaxOffset + 8 - ArgSizeA8);
20961 // Branch to "overflowMBB" if offset >= max
20962 // Fall through to "offsetMBB" otherwise
20963 BuildMI(thisMBB, DL, TII->get(X86::GetCondBranchFromCond(X86::COND_AE)))
20964 .addMBB(overflowMBB);
20967 // In offsetMBB, emit code to use the reg_save_area.
20969 assert(OffsetReg != 0);
20971 // Read the reg_save_area address.
20972 unsigned RegSaveReg = MRI.createVirtualRegister(AddrRegClass);
20973 BuildMI(offsetMBB, DL, TII->get(X86::MOV64rm), RegSaveReg)
20978 .addOperand(Segment)
20979 .setMemRefs(MMOBegin, MMOEnd);
20981 // Zero-extend the offset
20982 unsigned OffsetReg64 = MRI.createVirtualRegister(AddrRegClass);
20983 BuildMI(offsetMBB, DL, TII->get(X86::SUBREG_TO_REG), OffsetReg64)
20986 .addImm(X86::sub_32bit);
20988 // Add the offset to the reg_save_area to get the final address.
20989 BuildMI(offsetMBB, DL, TII->get(X86::ADD64rr), OffsetDestReg)
20990 .addReg(OffsetReg64)
20991 .addReg(RegSaveReg);
20993 // Compute the offset for the next argument
20994 unsigned NextOffsetReg = MRI.createVirtualRegister(OffsetRegClass);
20995 BuildMI(offsetMBB, DL, TII->get(X86::ADD32ri), NextOffsetReg)
20997 .addImm(UseFPOffset ? 16 : 8);
20999 // Store it back into the va_list.
21000 BuildMI(offsetMBB, DL, TII->get(X86::MOV32mr))
21004 .addDisp(Disp, UseFPOffset ? 4 : 0)
21005 .addOperand(Segment)
21006 .addReg(NextOffsetReg)
21007 .setMemRefs(MMOBegin, MMOEnd);
21010 BuildMI(offsetMBB, DL, TII->get(X86::JMP_1))
21015 // Emit code to use overflow area
21018 // Load the overflow_area address into a register.
21019 unsigned OverflowAddrReg = MRI.createVirtualRegister(AddrRegClass);
21020 BuildMI(overflowMBB, DL, TII->get(X86::MOV64rm), OverflowAddrReg)
21025 .addOperand(Segment)
21026 .setMemRefs(MMOBegin, MMOEnd);
21028 // If we need to align it, do so. Otherwise, just copy the address
21029 // to OverflowDestReg.
21031 // Align the overflow address
21032 assert((Align & (Align-1)) == 0 && "Alignment must be a power of 2");
21033 unsigned TmpReg = MRI.createVirtualRegister(AddrRegClass);
21035 // aligned_addr = (addr + (align-1)) & ~(align-1)
21036 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), TmpReg)
21037 .addReg(OverflowAddrReg)
21040 BuildMI(overflowMBB, DL, TII->get(X86::AND64ri32), OverflowDestReg)
21042 .addImm(~(uint64_t)(Align-1));
21044 BuildMI(overflowMBB, DL, TII->get(TargetOpcode::COPY), OverflowDestReg)
21045 .addReg(OverflowAddrReg);
21048 // Compute the next overflow address after this argument.
21049 // (the overflow address should be kept 8-byte aligned)
21050 unsigned NextAddrReg = MRI.createVirtualRegister(AddrRegClass);
21051 BuildMI(overflowMBB, DL, TII->get(X86::ADD64ri32), NextAddrReg)
21052 .addReg(OverflowDestReg)
21053 .addImm(ArgSizeA8);
21055 // Store the new overflow address.
21056 BuildMI(overflowMBB, DL, TII->get(X86::MOV64mr))
21061 .addOperand(Segment)
21062 .addReg(NextAddrReg)
21063 .setMemRefs(MMOBegin, MMOEnd);
21065 // If we branched, emit the PHI to the front of endMBB.
21067 BuildMI(*endMBB, endMBB->begin(), DL,
21068 TII->get(X86::PHI), DestReg)
21069 .addReg(OffsetDestReg).addMBB(offsetMBB)
21070 .addReg(OverflowDestReg).addMBB(overflowMBB);
21073 // Erase the pseudo instruction
21074 MI->eraseFromParent();
21079 MachineBasicBlock *
21080 X86TargetLowering::EmitVAStartSaveXMMRegsWithCustomInserter(
21082 MachineBasicBlock *MBB) const {
21083 // Emit code to save XMM registers to the stack. The ABI says that the
21084 // number of registers to save is given in %al, so it's theoretically
21085 // possible to do an indirect jump trick to avoid saving all of them,
21086 // however this code takes a simpler approach and just executes all
21087 // of the stores if %al is non-zero. It's less code, and it's probably
21088 // easier on the hardware branch predictor, and stores aren't all that
21089 // expensive anyway.
21091 // Create the new basic blocks. One block contains all the XMM stores,
21092 // and one block is the final destination regardless of whether any
21093 // stores were performed.
21094 const BasicBlock *LLVM_BB = MBB->getBasicBlock();
21095 MachineFunction *F = MBB->getParent();
21096 MachineFunction::iterator MBBIter = MBB;
21098 MachineBasicBlock *XMMSaveMBB = F->CreateMachineBasicBlock(LLVM_BB);
21099 MachineBasicBlock *EndMBB = F->CreateMachineBasicBlock(LLVM_BB);
21100 F->insert(MBBIter, XMMSaveMBB);
21101 F->insert(MBBIter, EndMBB);
21103 // Transfer the remainder of MBB and its successor edges to EndMBB.
21104 EndMBB->splice(EndMBB->begin(), MBB,
21105 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21106 EndMBB->transferSuccessorsAndUpdatePHIs(MBB);
21108 // The original block will now fall through to the XMM save block.
21109 MBB->addSuccessor(XMMSaveMBB);
21110 // The XMMSaveMBB will fall through to the end block.
21111 XMMSaveMBB->addSuccessor(EndMBB);
21113 // Now add the instructions.
21114 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21115 DebugLoc DL = MI->getDebugLoc();
21117 unsigned CountReg = MI->getOperand(0).getReg();
21118 int64_t RegSaveFrameIndex = MI->getOperand(1).getImm();
21119 int64_t VarArgsFPOffset = MI->getOperand(2).getImm();
21121 if (!Subtarget->isTargetWin64()) {
21122 // If %al is 0, branch around the XMM save block.
21123 BuildMI(MBB, DL, TII->get(X86::TEST8rr)).addReg(CountReg).addReg(CountReg);
21124 BuildMI(MBB, DL, TII->get(X86::JE_1)).addMBB(EndMBB);
21125 MBB->addSuccessor(EndMBB);
21128 // Make sure the last operand is EFLAGS, which gets clobbered by the branch
21129 // that was just emitted, but clearly shouldn't be "saved".
21130 assert((MI->getNumOperands() <= 3 ||
21131 !MI->getOperand(MI->getNumOperands() - 1).isReg() ||
21132 MI->getOperand(MI->getNumOperands() - 1).getReg() == X86::EFLAGS)
21133 && "Expected last argument to be EFLAGS");
21134 unsigned MOVOpc = Subtarget->hasFp256() ? X86::VMOVAPSmr : X86::MOVAPSmr;
21135 // In the XMM save block, save all the XMM argument registers.
21136 for (int i = 3, e = MI->getNumOperands() - 1; i != e; ++i) {
21137 int64_t Offset = (i - 3) * 16 + VarArgsFPOffset;
21138 MachineMemOperand *MMO =
21139 F->getMachineMemOperand(
21140 MachinePointerInfo::getFixedStack(RegSaveFrameIndex, Offset),
21141 MachineMemOperand::MOStore,
21142 /*Size=*/16, /*Align=*/16);
21143 BuildMI(XMMSaveMBB, DL, TII->get(MOVOpc))
21144 .addFrameIndex(RegSaveFrameIndex)
21145 .addImm(/*Scale=*/1)
21146 .addReg(/*IndexReg=*/0)
21147 .addImm(/*Disp=*/Offset)
21148 .addReg(/*Segment=*/0)
21149 .addReg(MI->getOperand(i).getReg())
21150 .addMemOperand(MMO);
21153 MI->eraseFromParent(); // The pseudo instruction is gone now.
21158 // The EFLAGS operand of SelectItr might be missing a kill marker
21159 // because there were multiple uses of EFLAGS, and ISel didn't know
21160 // which to mark. Figure out whether SelectItr should have had a
21161 // kill marker, and set it if it should. Returns the correct kill
21163 static bool checkAndUpdateEFLAGSKill(MachineBasicBlock::iterator SelectItr,
21164 MachineBasicBlock* BB,
21165 const TargetRegisterInfo* TRI) {
21166 // Scan forward through BB for a use/def of EFLAGS.
21167 MachineBasicBlock::iterator miI(std::next(SelectItr));
21168 for (MachineBasicBlock::iterator miE = BB->end(); miI != miE; ++miI) {
21169 const MachineInstr& mi = *miI;
21170 if (mi.readsRegister(X86::EFLAGS))
21172 if (mi.definesRegister(X86::EFLAGS))
21173 break; // Should have kill-flag - update below.
21176 // If we hit the end of the block, check whether EFLAGS is live into a
21178 if (miI == BB->end()) {
21179 for (MachineBasicBlock::succ_iterator sItr = BB->succ_begin(),
21180 sEnd = BB->succ_end();
21181 sItr != sEnd; ++sItr) {
21182 MachineBasicBlock* succ = *sItr;
21183 if (succ->isLiveIn(X86::EFLAGS))
21188 // We found a def, or hit the end of the basic block and EFLAGS wasn't live
21189 // out. SelectMI should have a kill flag on EFLAGS.
21190 SelectItr->addRegisterKilled(X86::EFLAGS, TRI);
21194 MachineBasicBlock *
21195 X86TargetLowering::EmitLoweredSelect(MachineInstr *MI,
21196 MachineBasicBlock *BB) const {
21197 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21198 DebugLoc DL = MI->getDebugLoc();
21200 // To "insert" a SELECT_CC instruction, we actually have to insert the
21201 // diamond control-flow pattern. The incoming instruction knows the
21202 // destination vreg to set, the condition code register to branch on, the
21203 // true/false values to select between, and a branch opcode to use.
21204 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21205 MachineFunction::iterator It = BB;
21211 // cmpTY ccX, r1, r2
21213 // fallthrough --> copy0MBB
21214 MachineBasicBlock *thisMBB = BB;
21215 MachineFunction *F = BB->getParent();
21216 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
21217 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
21218 F->insert(It, copy0MBB);
21219 F->insert(It, sinkMBB);
21221 // If the EFLAGS register isn't dead in the terminator, then claim that it's
21222 // live into the sink and copy blocks.
21223 const TargetRegisterInfo *TRI = Subtarget->getRegisterInfo();
21224 if (!MI->killsRegister(X86::EFLAGS) &&
21225 !checkAndUpdateEFLAGSKill(MI, BB, TRI)) {
21226 copy0MBB->addLiveIn(X86::EFLAGS);
21227 sinkMBB->addLiveIn(X86::EFLAGS);
21230 // Transfer the remainder of BB and its successor edges to sinkMBB.
21231 sinkMBB->splice(sinkMBB->begin(), BB,
21232 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21233 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
21235 // Add the true and fallthrough blocks as its successors.
21236 BB->addSuccessor(copy0MBB);
21237 BB->addSuccessor(sinkMBB);
21239 // Create the conditional branch instruction.
21241 X86::GetCondBranchFromCond((X86::CondCode)MI->getOperand(3).getImm());
21242 BuildMI(BB, DL, TII->get(Opc)).addMBB(sinkMBB);
21245 // %FalseValue = ...
21246 // # fallthrough to sinkMBB
21247 copy0MBB->addSuccessor(sinkMBB);
21250 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
21252 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21253 TII->get(X86::PHI), MI->getOperand(0).getReg())
21254 .addReg(MI->getOperand(1).getReg()).addMBB(copy0MBB)
21255 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
21257 MI->eraseFromParent(); // The pseudo instruction is gone now.
21261 MachineBasicBlock *
21262 X86TargetLowering::EmitLoweredSegAlloca(MachineInstr *MI,
21263 MachineBasicBlock *BB) const {
21264 MachineFunction *MF = BB->getParent();
21265 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21266 DebugLoc DL = MI->getDebugLoc();
21267 const BasicBlock *LLVM_BB = BB->getBasicBlock();
21269 assert(MF->shouldSplitStack());
21271 const bool Is64Bit = Subtarget->is64Bit();
21272 const bool IsLP64 = Subtarget->isTarget64BitLP64();
21274 const unsigned TlsReg = Is64Bit ? X86::FS : X86::GS;
21275 const unsigned TlsOffset = IsLP64 ? 0x70 : Is64Bit ? 0x40 : 0x30;
21278 // ... [Till the alloca]
21279 // If stacklet is not large enough, jump to mallocMBB
21282 // Allocate by subtracting from RSP
21283 // Jump to continueMBB
21286 // Allocate by call to runtime
21290 // [rest of original BB]
21293 MachineBasicBlock *mallocMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21294 MachineBasicBlock *bumpMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21295 MachineBasicBlock *continueMBB = MF->CreateMachineBasicBlock(LLVM_BB);
21297 MachineRegisterInfo &MRI = MF->getRegInfo();
21298 const TargetRegisterClass *AddrRegClass =
21299 getRegClassFor(getPointerTy());
21301 unsigned mallocPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21302 bumpSPPtrVReg = MRI.createVirtualRegister(AddrRegClass),
21303 tmpSPVReg = MRI.createVirtualRegister(AddrRegClass),
21304 SPLimitVReg = MRI.createVirtualRegister(AddrRegClass),
21305 sizeVReg = MI->getOperand(1).getReg(),
21306 physSPReg = IsLP64 || Subtarget->isTargetNaCl64() ? X86::RSP : X86::ESP;
21308 MachineFunction::iterator MBBIter = BB;
21311 MF->insert(MBBIter, bumpMBB);
21312 MF->insert(MBBIter, mallocMBB);
21313 MF->insert(MBBIter, continueMBB);
21315 continueMBB->splice(continueMBB->begin(), BB,
21316 std::next(MachineBasicBlock::iterator(MI)), BB->end());
21317 continueMBB->transferSuccessorsAndUpdatePHIs(BB);
21319 // Add code to the main basic block to check if the stack limit has been hit,
21320 // and if so, jump to mallocMBB otherwise to bumpMBB.
21321 BuildMI(BB, DL, TII->get(TargetOpcode::COPY), tmpSPVReg).addReg(physSPReg);
21322 BuildMI(BB, DL, TII->get(IsLP64 ? X86::SUB64rr:X86::SUB32rr), SPLimitVReg)
21323 .addReg(tmpSPVReg).addReg(sizeVReg);
21324 BuildMI(BB, DL, TII->get(IsLP64 ? X86::CMP64mr:X86::CMP32mr))
21325 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg)
21326 .addReg(SPLimitVReg);
21327 BuildMI(BB, DL, TII->get(X86::JG_1)).addMBB(mallocMBB);
21329 // bumpMBB simply decreases the stack pointer, since we know the current
21330 // stacklet has enough space.
21331 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), physSPReg)
21332 .addReg(SPLimitVReg);
21333 BuildMI(bumpMBB, DL, TII->get(TargetOpcode::COPY), bumpSPPtrVReg)
21334 .addReg(SPLimitVReg);
21335 BuildMI(bumpMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21337 // Calls into a routine in libgcc to allocate more space from the heap.
21338 const uint32_t *RegMask =
21339 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21341 BuildMI(mallocMBB, DL, TII->get(X86::MOV64rr), X86::RDI)
21343 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21344 .addExternalSymbol("__morestack_allocate_stack_space")
21345 .addRegMask(RegMask)
21346 .addReg(X86::RDI, RegState::Implicit)
21347 .addReg(X86::RAX, RegState::ImplicitDefine);
21348 } else if (Is64Bit) {
21349 BuildMI(mallocMBB, DL, TII->get(X86::MOV32rr), X86::EDI)
21351 BuildMI(mallocMBB, DL, TII->get(X86::CALL64pcrel32))
21352 .addExternalSymbol("__morestack_allocate_stack_space")
21353 .addRegMask(RegMask)
21354 .addReg(X86::EDI, RegState::Implicit)
21355 .addReg(X86::EAX, RegState::ImplicitDefine);
21357 BuildMI(mallocMBB, DL, TII->get(X86::SUB32ri), physSPReg).addReg(physSPReg)
21359 BuildMI(mallocMBB, DL, TII->get(X86::PUSH32r)).addReg(sizeVReg);
21360 BuildMI(mallocMBB, DL, TII->get(X86::CALLpcrel32))
21361 .addExternalSymbol("__morestack_allocate_stack_space")
21362 .addRegMask(RegMask)
21363 .addReg(X86::EAX, RegState::ImplicitDefine);
21367 BuildMI(mallocMBB, DL, TII->get(X86::ADD32ri), physSPReg).addReg(physSPReg)
21370 BuildMI(mallocMBB, DL, TII->get(TargetOpcode::COPY), mallocPtrVReg)
21371 .addReg(IsLP64 ? X86::RAX : X86::EAX);
21372 BuildMI(mallocMBB, DL, TII->get(X86::JMP_1)).addMBB(continueMBB);
21374 // Set up the CFG correctly.
21375 BB->addSuccessor(bumpMBB);
21376 BB->addSuccessor(mallocMBB);
21377 mallocMBB->addSuccessor(continueMBB);
21378 bumpMBB->addSuccessor(continueMBB);
21380 // Take care of the PHI nodes.
21381 BuildMI(*continueMBB, continueMBB->begin(), DL, TII->get(X86::PHI),
21382 MI->getOperand(0).getReg())
21383 .addReg(mallocPtrVReg).addMBB(mallocMBB)
21384 .addReg(bumpSPPtrVReg).addMBB(bumpMBB);
21386 // Delete the original pseudo instruction.
21387 MI->eraseFromParent();
21390 return continueMBB;
21393 MachineBasicBlock *
21394 X86TargetLowering::EmitLoweredWinAlloca(MachineInstr *MI,
21395 MachineBasicBlock *BB) const {
21396 DebugLoc DL = MI->getDebugLoc();
21398 assert(!Subtarget->isTargetMachO());
21400 X86FrameLowering::emitStackProbeCall(*BB->getParent(), *BB, MI, DL);
21402 MI->eraseFromParent(); // The pseudo instruction is gone now.
21406 MachineBasicBlock *
21407 X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
21408 MachineBasicBlock *BB) const {
21409 // This is pretty easy. We're taking the value that we received from
21410 // our load from the relocation, sticking it in either RDI (x86-64)
21411 // or EAX and doing an indirect call. The return value will then
21412 // be in the normal return register.
21413 MachineFunction *F = BB->getParent();
21414 const X86InstrInfo *TII = Subtarget->getInstrInfo();
21415 DebugLoc DL = MI->getDebugLoc();
21417 assert(Subtarget->isTargetDarwin() && "Darwin only instr emitted?");
21418 assert(MI->getOperand(3).isGlobal() && "This should be a global");
21420 // Get a register mask for the lowered call.
21421 // FIXME: The 32-bit calls have non-standard calling conventions. Use a
21422 // proper register mask.
21423 const uint32_t *RegMask =
21424 Subtarget->getRegisterInfo()->getCallPreservedMask(CallingConv::C);
21425 if (Subtarget->is64Bit()) {
21426 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21427 TII->get(X86::MOV64rm), X86::RDI)
21429 .addImm(0).addReg(0)
21430 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21431 MI->getOperand(3).getTargetFlags())
21433 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL64m));
21434 addDirectMem(MIB, X86::RDI);
21435 MIB.addReg(X86::RAX, RegState::ImplicitDefine).addRegMask(RegMask);
21436 } else if (F->getTarget().getRelocationModel() != Reloc::PIC_) {
21437 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21438 TII->get(X86::MOV32rm), X86::EAX)
21440 .addImm(0).addReg(0)
21441 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21442 MI->getOperand(3).getTargetFlags())
21444 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21445 addDirectMem(MIB, X86::EAX);
21446 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21448 MachineInstrBuilder MIB = BuildMI(*BB, MI, DL,
21449 TII->get(X86::MOV32rm), X86::EAX)
21450 .addReg(TII->getGlobalBaseReg(F))
21451 .addImm(0).addReg(0)
21452 .addGlobalAddress(MI->getOperand(3).getGlobal(), 0,
21453 MI->getOperand(3).getTargetFlags())
21455 MIB = BuildMI(*BB, MI, DL, TII->get(X86::CALL32m));
21456 addDirectMem(MIB, X86::EAX);
21457 MIB.addReg(X86::EAX, RegState::ImplicitDefine).addRegMask(RegMask);
21460 MI->eraseFromParent(); // The pseudo instruction is gone now.
21464 MachineBasicBlock *
21465 X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
21466 MachineBasicBlock *MBB) const {
21467 DebugLoc DL = MI->getDebugLoc();
21468 MachineFunction *MF = MBB->getParent();
21469 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21470 MachineRegisterInfo &MRI = MF->getRegInfo();
21472 const BasicBlock *BB = MBB->getBasicBlock();
21473 MachineFunction::iterator I = MBB;
21476 // Memory Reference
21477 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21478 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21481 unsigned MemOpndSlot = 0;
21483 unsigned CurOp = 0;
21485 DstReg = MI->getOperand(CurOp++).getReg();
21486 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
21487 assert(RC->hasType(MVT::i32) && "Invalid destination!");
21488 unsigned mainDstReg = MRI.createVirtualRegister(RC);
21489 unsigned restoreDstReg = MRI.createVirtualRegister(RC);
21491 MemOpndSlot = CurOp;
21493 MVT PVT = getPointerTy();
21494 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21495 "Invalid Pointer Size!");
21497 // For v = setjmp(buf), we generate
21500 // buf[LabelOffset] = restoreMBB
21501 // SjLjSetup restoreMBB
21507 // v = phi(main, restore)
21510 // if base pointer being used, load it from frame
21513 MachineBasicBlock *thisMBB = MBB;
21514 MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
21515 MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
21516 MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
21517 MF->insert(I, mainMBB);
21518 MF->insert(I, sinkMBB);
21519 MF->push_back(restoreMBB);
21521 MachineInstrBuilder MIB;
21523 // Transfer the remainder of BB and its successor edges to sinkMBB.
21524 sinkMBB->splice(sinkMBB->begin(), MBB,
21525 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
21526 sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
21529 unsigned PtrStoreOpc = 0;
21530 unsigned LabelReg = 0;
21531 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21532 Reloc::Model RM = MF->getTarget().getRelocationModel();
21533 bool UseImmLabel = (MF->getTarget().getCodeModel() == CodeModel::Small) &&
21534 (RM == Reloc::Static || RM == Reloc::DynamicNoPIC);
21536 // Prepare IP either in reg or imm.
21537 if (!UseImmLabel) {
21538 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mr : X86::MOV32mr;
21539 const TargetRegisterClass *PtrRC = getRegClassFor(PVT);
21540 LabelReg = MRI.createVirtualRegister(PtrRC);
21541 if (Subtarget->is64Bit()) {
21542 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA64r), LabelReg)
21546 .addMBB(restoreMBB)
21549 const X86InstrInfo *XII = static_cast<const X86InstrInfo*>(TII);
21550 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::LEA32r), LabelReg)
21551 .addReg(XII->getGlobalBaseReg(MF))
21554 .addMBB(restoreMBB, Subtarget->ClassifyBlockAddressReference())
21558 PtrStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
21560 MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrStoreOpc));
21561 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21562 if (i == X86::AddrDisp)
21563 MIB.addDisp(MI->getOperand(MemOpndSlot + i), LabelOffset);
21565 MIB.addOperand(MI->getOperand(MemOpndSlot + i));
21568 MIB.addReg(LabelReg);
21570 MIB.addMBB(restoreMBB);
21571 MIB.setMemRefs(MMOBegin, MMOEnd);
21573 MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
21574 .addMBB(restoreMBB);
21576 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21577 MIB.addRegMask(RegInfo->getNoPreservedMask());
21578 thisMBB->addSuccessor(mainMBB);
21579 thisMBB->addSuccessor(restoreMBB);
21583 BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
21584 mainMBB->addSuccessor(sinkMBB);
21587 BuildMI(*sinkMBB, sinkMBB->begin(), DL,
21588 TII->get(X86::PHI), DstReg)
21589 .addReg(mainDstReg).addMBB(mainMBB)
21590 .addReg(restoreDstReg).addMBB(restoreMBB);
21593 if (RegInfo->hasBasePointer(*MF)) {
21594 const bool Uses64BitFramePtr =
21595 Subtarget->isTarget64BitLP64() || Subtarget->isTargetNaCl64();
21596 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
21597 X86FI->setRestoreBasePointer(MF);
21598 unsigned FramePtr = RegInfo->getFrameRegister(*MF);
21599 unsigned BasePtr = RegInfo->getBaseRegister();
21600 unsigned Opm = Uses64BitFramePtr ? X86::MOV64rm : X86::MOV32rm;
21601 addRegOffset(BuildMI(restoreMBB, DL, TII->get(Opm), BasePtr),
21602 FramePtr, true, X86FI->getRestoreBasePointerOffset())
21603 .setMIFlag(MachineInstr::FrameSetup);
21605 BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
21606 BuildMI(restoreMBB, DL, TII->get(X86::JMP_1)).addMBB(sinkMBB);
21607 restoreMBB->addSuccessor(sinkMBB);
21609 MI->eraseFromParent();
21613 MachineBasicBlock *
21614 X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
21615 MachineBasicBlock *MBB) const {
21616 DebugLoc DL = MI->getDebugLoc();
21617 MachineFunction *MF = MBB->getParent();
21618 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21619 MachineRegisterInfo &MRI = MF->getRegInfo();
21621 // Memory Reference
21622 MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
21623 MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
21625 MVT PVT = getPointerTy();
21626 assert((PVT == MVT::i64 || PVT == MVT::i32) &&
21627 "Invalid Pointer Size!");
21629 const TargetRegisterClass *RC =
21630 (PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
21631 unsigned Tmp = MRI.createVirtualRegister(RC);
21632 // Since FP is only updated here but NOT referenced, it's treated as GPR.
21633 const X86RegisterInfo *RegInfo = Subtarget->getRegisterInfo();
21634 unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
21635 unsigned SP = RegInfo->getStackRegister();
21637 MachineInstrBuilder MIB;
21639 const int64_t LabelOffset = 1 * PVT.getStoreSize();
21640 const int64_t SPOffset = 2 * PVT.getStoreSize();
21642 unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
21643 unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
21646 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
21647 for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
21648 MIB.addOperand(MI->getOperand(i));
21649 MIB.setMemRefs(MMOBegin, MMOEnd);
21651 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
21652 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21653 if (i == X86::AddrDisp)
21654 MIB.addDisp(MI->getOperand(i), LabelOffset);
21656 MIB.addOperand(MI->getOperand(i));
21658 MIB.setMemRefs(MMOBegin, MMOEnd);
21660 MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
21661 for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
21662 if (i == X86::AddrDisp)
21663 MIB.addDisp(MI->getOperand(i), SPOffset);
21665 MIB.addOperand(MI->getOperand(i));
21667 MIB.setMemRefs(MMOBegin, MMOEnd);
21669 BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
21671 MI->eraseFromParent();
21675 // Replace 213-type (isel default) FMA3 instructions with 231-type for
21676 // accumulator loops. Writing back to the accumulator allows the coalescer
21677 // to remove extra copies in the loop.
21678 MachineBasicBlock *
21679 X86TargetLowering::emitFMA3Instr(MachineInstr *MI,
21680 MachineBasicBlock *MBB) const {
21681 MachineOperand &AddendOp = MI->getOperand(3);
21683 // Bail out early if the addend isn't a register - we can't switch these.
21684 if (!AddendOp.isReg())
21687 MachineFunction &MF = *MBB->getParent();
21688 MachineRegisterInfo &MRI = MF.getRegInfo();
21690 // Check whether the addend is defined by a PHI:
21691 assert(MRI.hasOneDef(AddendOp.getReg()) && "Multiple defs in SSA?");
21692 MachineInstr &AddendDef = *MRI.def_instr_begin(AddendOp.getReg());
21693 if (!AddendDef.isPHI())
21696 // Look for the following pattern:
21698 // %addend = phi [%entry, 0], [%loop, %result]
21700 // %result<tied1> = FMA213 %m2<tied0>, %m1, %addend
21704 // %addend = phi [%entry, 0], [%loop, %result]
21706 // %result<tied1> = FMA231 %addend<tied0>, %m1, %m2
21708 for (unsigned i = 1, e = AddendDef.getNumOperands(); i < e; i += 2) {
21709 assert(AddendDef.getOperand(i).isReg());
21710 MachineOperand PHISrcOp = AddendDef.getOperand(i);
21711 MachineInstr &PHISrcInst = *MRI.def_instr_begin(PHISrcOp.getReg());
21712 if (&PHISrcInst == MI) {
21713 // Found a matching instruction.
21714 unsigned NewFMAOpc = 0;
21715 switch (MI->getOpcode()) {
21716 case X86::VFMADDPDr213r: NewFMAOpc = X86::VFMADDPDr231r; break;
21717 case X86::VFMADDPSr213r: NewFMAOpc = X86::VFMADDPSr231r; break;
21718 case X86::VFMADDSDr213r: NewFMAOpc = X86::VFMADDSDr231r; break;
21719 case X86::VFMADDSSr213r: NewFMAOpc = X86::VFMADDSSr231r; break;
21720 case X86::VFMSUBPDr213r: NewFMAOpc = X86::VFMSUBPDr231r; break;
21721 case X86::VFMSUBPSr213r: NewFMAOpc = X86::VFMSUBPSr231r; break;
21722 case X86::VFMSUBSDr213r: NewFMAOpc = X86::VFMSUBSDr231r; break;
21723 case X86::VFMSUBSSr213r: NewFMAOpc = X86::VFMSUBSSr231r; break;
21724 case X86::VFNMADDPDr213r: NewFMAOpc = X86::VFNMADDPDr231r; break;
21725 case X86::VFNMADDPSr213r: NewFMAOpc = X86::VFNMADDPSr231r; break;
21726 case X86::VFNMADDSDr213r: NewFMAOpc = X86::VFNMADDSDr231r; break;
21727 case X86::VFNMADDSSr213r: NewFMAOpc = X86::VFNMADDSSr231r; break;
21728 case X86::VFNMSUBPDr213r: NewFMAOpc = X86::VFNMSUBPDr231r; break;
21729 case X86::VFNMSUBPSr213r: NewFMAOpc = X86::VFNMSUBPSr231r; break;
21730 case X86::VFNMSUBSDr213r: NewFMAOpc = X86::VFNMSUBSDr231r; break;
21731 case X86::VFNMSUBSSr213r: NewFMAOpc = X86::VFNMSUBSSr231r; break;
21732 case X86::VFMADDSUBPDr213r: NewFMAOpc = X86::VFMADDSUBPDr231r; break;
21733 case X86::VFMADDSUBPSr213r: NewFMAOpc = X86::VFMADDSUBPSr231r; break;
21734 case X86::VFMSUBADDPDr213r: NewFMAOpc = X86::VFMSUBADDPDr231r; break;
21735 case X86::VFMSUBADDPSr213r: NewFMAOpc = X86::VFMSUBADDPSr231r; break;
21737 case X86::VFMADDPDr213rY: NewFMAOpc = X86::VFMADDPDr231rY; break;
21738 case X86::VFMADDPSr213rY: NewFMAOpc = X86::VFMADDPSr231rY; break;
21739 case X86::VFMSUBPDr213rY: NewFMAOpc = X86::VFMSUBPDr231rY; break;
21740 case X86::VFMSUBPSr213rY: NewFMAOpc = X86::VFMSUBPSr231rY; break;
21741 case X86::VFNMADDPDr213rY: NewFMAOpc = X86::VFNMADDPDr231rY; break;
21742 case X86::VFNMADDPSr213rY: NewFMAOpc = X86::VFNMADDPSr231rY; break;
21743 case X86::VFNMSUBPDr213rY: NewFMAOpc = X86::VFNMSUBPDr231rY; break;
21744 case X86::VFNMSUBPSr213rY: NewFMAOpc = X86::VFNMSUBPSr231rY; break;
21745 case X86::VFMADDSUBPDr213rY: NewFMAOpc = X86::VFMADDSUBPDr231rY; break;
21746 case X86::VFMADDSUBPSr213rY: NewFMAOpc = X86::VFMADDSUBPSr231rY; break;
21747 case X86::VFMSUBADDPDr213rY: NewFMAOpc = X86::VFMSUBADDPDr231rY; break;
21748 case X86::VFMSUBADDPSr213rY: NewFMAOpc = X86::VFMSUBADDPSr231rY; break;
21749 default: llvm_unreachable("Unrecognized FMA variant.");
21752 const TargetInstrInfo &TII = *Subtarget->getInstrInfo();
21753 MachineInstrBuilder MIB =
21754 BuildMI(MF, MI->getDebugLoc(), TII.get(NewFMAOpc))
21755 .addOperand(MI->getOperand(0))
21756 .addOperand(MI->getOperand(3))
21757 .addOperand(MI->getOperand(2))
21758 .addOperand(MI->getOperand(1));
21759 MBB->insert(MachineBasicBlock::iterator(MI), MIB);
21760 MI->eraseFromParent();
21767 MachineBasicBlock *
21768 X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
21769 MachineBasicBlock *BB) const {
21770 switch (MI->getOpcode()) {
21771 default: llvm_unreachable("Unexpected instr type to insert");
21772 case X86::TAILJMPd64:
21773 case X86::TAILJMPr64:
21774 case X86::TAILJMPm64:
21775 case X86::TAILJMPd64_REX:
21776 case X86::TAILJMPr64_REX:
21777 case X86::TAILJMPm64_REX:
21778 llvm_unreachable("TAILJMP64 would not be touched here.");
21779 case X86::TCRETURNdi64:
21780 case X86::TCRETURNri64:
21781 case X86::TCRETURNmi64:
21783 case X86::WIN_ALLOCA:
21784 return EmitLoweredWinAlloca(MI, BB);
21785 case X86::SEG_ALLOCA_32:
21786 case X86::SEG_ALLOCA_64:
21787 return EmitLoweredSegAlloca(MI, BB);
21788 case X86::TLSCall_32:
21789 case X86::TLSCall_64:
21790 return EmitLoweredTLSCall(MI, BB);
21791 case X86::CMOV_GR8:
21792 case X86::CMOV_FR32:
21793 case X86::CMOV_FR64:
21794 case X86::CMOV_V4F32:
21795 case X86::CMOV_V2F64:
21796 case X86::CMOV_V2I64:
21797 case X86::CMOV_V8F32:
21798 case X86::CMOV_V4F64:
21799 case X86::CMOV_V4I64:
21800 case X86::CMOV_V16F32:
21801 case X86::CMOV_V8F64:
21802 case X86::CMOV_V8I64:
21803 case X86::CMOV_GR16:
21804 case X86::CMOV_GR32:
21805 case X86::CMOV_RFP32:
21806 case X86::CMOV_RFP64:
21807 case X86::CMOV_RFP80:
21808 return EmitLoweredSelect(MI, BB);
21810 case X86::FP32_TO_INT16_IN_MEM:
21811 case X86::FP32_TO_INT32_IN_MEM:
21812 case X86::FP32_TO_INT64_IN_MEM:
21813 case X86::FP64_TO_INT16_IN_MEM:
21814 case X86::FP64_TO_INT32_IN_MEM:
21815 case X86::FP64_TO_INT64_IN_MEM:
21816 case X86::FP80_TO_INT16_IN_MEM:
21817 case X86::FP80_TO_INT32_IN_MEM:
21818 case X86::FP80_TO_INT64_IN_MEM: {
21819 MachineFunction *F = BB->getParent();
21820 const TargetInstrInfo *TII = Subtarget->getInstrInfo();
21821 DebugLoc DL = MI->getDebugLoc();
21823 // Change the floating point control register to use "round towards zero"
21824 // mode when truncating to an integer value.
21825 int CWFrameIdx = F->getFrameInfo()->CreateStackObject(2, 2, false);
21826 addFrameReference(BuildMI(*BB, MI, DL,
21827 TII->get(X86::FNSTCW16m)), CWFrameIdx);
21829 // Load the old value of the high byte of the control word...
21831 F->getRegInfo().createVirtualRegister(&X86::GR16RegClass);
21832 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16rm), OldCW),
21835 // Set the high part to be round to zero...
21836 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mi)), CWFrameIdx)
21839 // Reload the modified control word now...
21840 addFrameReference(BuildMI(*BB, MI, DL,
21841 TII->get(X86::FLDCW16m)), CWFrameIdx);
21843 // Restore the memory image of control word to original value
21844 addFrameReference(BuildMI(*BB, MI, DL, TII->get(X86::MOV16mr)), CWFrameIdx)
21847 // Get the X86 opcode to use.
21849 switch (MI->getOpcode()) {
21850 default: llvm_unreachable("illegal opcode!");
21851 case X86::FP32_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m32; break;
21852 case X86::FP32_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m32; break;
21853 case X86::FP32_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m32; break;
21854 case X86::FP64_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m64; break;
21855 case X86::FP64_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m64; break;
21856 case X86::FP64_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m64; break;
21857 case X86::FP80_TO_INT16_IN_MEM: Opc = X86::IST_Fp16m80; break;
21858 case X86::FP80_TO_INT32_IN_MEM: Opc = X86::IST_Fp32m80; break;
21859 case X86::FP80_TO_INT64_IN_MEM: Opc = X86::IST_Fp64m80; break;
21863 MachineOperand &Op = MI->getOperand(0);
21865 AM.BaseType = X86AddressMode::RegBase;
21866 AM.Base.Reg = Op.getReg();
21868 AM.BaseType = X86AddressMode::FrameIndexBase;
21869 AM.Base.FrameIndex = Op.getIndex();
21871 Op = MI->getOperand(1);
21873 AM.Scale = Op.getImm();
21874 Op = MI->getOperand(2);
21876 AM.IndexReg = Op.getImm();
21877 Op = MI->getOperand(3);
21878 if (Op.isGlobal()) {
21879 AM.GV = Op.getGlobal();
21881 AM.Disp = Op.getImm();
21883 addFullAddress(BuildMI(*BB, MI, DL, TII->get(Opc)), AM)
21884 .addReg(MI->getOperand(X86::AddrNumOperands).getReg());
21886 // Reload the original control word now.
21887 addFrameReference(BuildMI(*BB, MI, DL,
21888 TII->get(X86::FLDCW16m)), CWFrameIdx);
21890 MI->eraseFromParent(); // The pseudo instruction is gone now.
21893 // String/text processing lowering.
21894 case X86::PCMPISTRM128REG:
21895 case X86::VPCMPISTRM128REG:
21896 case X86::PCMPISTRM128MEM:
21897 case X86::VPCMPISTRM128MEM:
21898 case X86::PCMPESTRM128REG:
21899 case X86::VPCMPESTRM128REG:
21900 case X86::PCMPESTRM128MEM:
21901 case X86::VPCMPESTRM128MEM:
21902 assert(Subtarget->hasSSE42() &&
21903 "Target must have SSE4.2 or AVX features enabled");
21904 return EmitPCMPSTRM(MI, BB, Subtarget->getInstrInfo());
21906 // String/text processing lowering.
21907 case X86::PCMPISTRIREG:
21908 case X86::VPCMPISTRIREG:
21909 case X86::PCMPISTRIMEM:
21910 case X86::VPCMPISTRIMEM:
21911 case X86::PCMPESTRIREG:
21912 case X86::VPCMPESTRIREG:
21913 case X86::PCMPESTRIMEM:
21914 case X86::VPCMPESTRIMEM:
21915 assert(Subtarget->hasSSE42() &&
21916 "Target must have SSE4.2 or AVX features enabled");
21917 return EmitPCMPSTRI(MI, BB, Subtarget->getInstrInfo());
21919 // Thread synchronization.
21921 return EmitMonitor(MI, BB, Subtarget);
21925 return EmitXBegin(MI, BB, Subtarget->getInstrInfo());
21927 case X86::VASTART_SAVE_XMM_REGS:
21928 return EmitVAStartSaveXMMRegsWithCustomInserter(MI, BB);
21930 case X86::VAARG_64:
21931 return EmitVAARG64WithCustomInserter(MI, BB);
21933 case X86::EH_SjLj_SetJmp32:
21934 case X86::EH_SjLj_SetJmp64:
21935 return emitEHSjLjSetJmp(MI, BB);
21937 case X86::EH_SjLj_LongJmp32:
21938 case X86::EH_SjLj_LongJmp64:
21939 return emitEHSjLjLongJmp(MI, BB);
21941 case TargetOpcode::STATEPOINT:
21942 // As an implementation detail, STATEPOINT shares the STACKMAP format at
21943 // this point in the process. We diverge later.
21944 return emitPatchPoint(MI, BB);
21946 case TargetOpcode::STACKMAP:
21947 case TargetOpcode::PATCHPOINT:
21948 return emitPatchPoint(MI, BB);
21950 case X86::VFMADDPDr213r:
21951 case X86::VFMADDPSr213r:
21952 case X86::VFMADDSDr213r:
21953 case X86::VFMADDSSr213r:
21954 case X86::VFMSUBPDr213r:
21955 case X86::VFMSUBPSr213r:
21956 case X86::VFMSUBSDr213r:
21957 case X86::VFMSUBSSr213r:
21958 case X86::VFNMADDPDr213r:
21959 case X86::VFNMADDPSr213r:
21960 case X86::VFNMADDSDr213r:
21961 case X86::VFNMADDSSr213r:
21962 case X86::VFNMSUBPDr213r:
21963 case X86::VFNMSUBPSr213r:
21964 case X86::VFNMSUBSDr213r:
21965 case X86::VFNMSUBSSr213r:
21966 case X86::VFMADDSUBPDr213r:
21967 case X86::VFMADDSUBPSr213r:
21968 case X86::VFMSUBADDPDr213r:
21969 case X86::VFMSUBADDPSr213r:
21970 case X86::VFMADDPDr213rY:
21971 case X86::VFMADDPSr213rY:
21972 case X86::VFMSUBPDr213rY:
21973 case X86::VFMSUBPSr213rY:
21974 case X86::VFNMADDPDr213rY:
21975 case X86::VFNMADDPSr213rY:
21976 case X86::VFNMSUBPDr213rY:
21977 case X86::VFNMSUBPSr213rY:
21978 case X86::VFMADDSUBPDr213rY:
21979 case X86::VFMADDSUBPSr213rY:
21980 case X86::VFMSUBADDPDr213rY:
21981 case X86::VFMSUBADDPSr213rY:
21982 return emitFMA3Instr(MI, BB);
21986 //===----------------------------------------------------------------------===//
21987 // X86 Optimization Hooks
21988 //===----------------------------------------------------------------------===//
21990 void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
21993 const SelectionDAG &DAG,
21994 unsigned Depth) const {
21995 unsigned BitWidth = KnownZero.getBitWidth();
21996 unsigned Opc = Op.getOpcode();
21997 assert((Opc >= ISD::BUILTIN_OP_END ||
21998 Opc == ISD::INTRINSIC_WO_CHAIN ||
21999 Opc == ISD::INTRINSIC_W_CHAIN ||
22000 Opc == ISD::INTRINSIC_VOID) &&
22001 "Should use MaskedValueIsZero if you don't know whether Op"
22002 " is a target node!");
22004 KnownZero = KnownOne = APInt(BitWidth, 0); // Don't know anything.
22018 // These nodes' second result is a boolean.
22019 if (Op.getResNo() == 0)
22022 case X86ISD::SETCC:
22023 KnownZero |= APInt::getHighBitsSet(BitWidth, BitWidth - 1);
22025 case ISD::INTRINSIC_WO_CHAIN: {
22026 unsigned IntId = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
22027 unsigned NumLoBits = 0;
22030 case Intrinsic::x86_sse_movmsk_ps:
22031 case Intrinsic::x86_avx_movmsk_ps_256:
22032 case Intrinsic::x86_sse2_movmsk_pd:
22033 case Intrinsic::x86_avx_movmsk_pd_256:
22034 case Intrinsic::x86_mmx_pmovmskb:
22035 case Intrinsic::x86_sse2_pmovmskb_128:
22036 case Intrinsic::x86_avx2_pmovmskb: {
22037 // High bits of movmskp{s|d}, pmovmskb are known zero.
22039 default: llvm_unreachable("Impossible intrinsic"); // Can't reach here.
22040 case Intrinsic::x86_sse_movmsk_ps: NumLoBits = 4; break;
22041 case Intrinsic::x86_avx_movmsk_ps_256: NumLoBits = 8; break;
22042 case Intrinsic::x86_sse2_movmsk_pd: NumLoBits = 2; break;
22043 case Intrinsic::x86_avx_movmsk_pd_256: NumLoBits = 4; break;
22044 case Intrinsic::x86_mmx_pmovmskb: NumLoBits = 8; break;
22045 case Intrinsic::x86_sse2_pmovmskb_128: NumLoBits = 16; break;
22046 case Intrinsic::x86_avx2_pmovmskb: NumLoBits = 32; break;
22048 KnownZero = APInt::getHighBitsSet(BitWidth, BitWidth - NumLoBits);
22057 unsigned X86TargetLowering::ComputeNumSignBitsForTargetNode(
22059 const SelectionDAG &,
22060 unsigned Depth) const {
22061 // SETCC_CARRY sets the dest to ~0 for true or 0 for false.
22062 if (Op.getOpcode() == X86ISD::SETCC_CARRY)
22063 return Op.getValueType().getScalarType().getSizeInBits();
22069 /// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
22070 /// node is a GlobalAddress + offset.
22071 bool X86TargetLowering::isGAPlusOffset(SDNode *N,
22072 const GlobalValue* &GA,
22073 int64_t &Offset) const {
22074 if (N->getOpcode() == X86ISD::Wrapper) {
22075 if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
22076 GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
22077 Offset = cast<GlobalAddressSDNode>(N->getOperand(0))->getOffset();
22081 return TargetLowering::isGAPlusOffset(N, GA, Offset);
22084 /// isShuffleHigh128VectorInsertLow - Checks whether the shuffle node is the
22085 /// same as extracting the high 128-bit part of 256-bit vector and then
22086 /// inserting the result into the low part of a new 256-bit vector
22087 static bool isShuffleHigh128VectorInsertLow(ShuffleVectorSDNode *SVOp) {
22088 EVT VT = SVOp->getValueType(0);
22089 unsigned NumElems = VT.getVectorNumElements();
22091 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22092 for (unsigned i = 0, j = NumElems/2; i != NumElems/2; ++i, ++j)
22093 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22094 SVOp->getMaskElt(j) >= 0)
22100 /// isShuffleLow128VectorInsertHigh - Checks whether the shuffle node is the
22101 /// same as extracting the low 128-bit part of 256-bit vector and then
22102 /// inserting the result into the high part of a new 256-bit vector
22103 static bool isShuffleLow128VectorInsertHigh(ShuffleVectorSDNode *SVOp) {
22104 EVT VT = SVOp->getValueType(0);
22105 unsigned NumElems = VT.getVectorNumElements();
22107 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22108 for (unsigned i = NumElems/2, j = 0; i != NumElems; ++i, ++j)
22109 if (!isUndefOrEqual(SVOp->getMaskElt(i), j) ||
22110 SVOp->getMaskElt(j) >= 0)
22116 /// PerformShuffleCombine256 - Performs shuffle combines for 256-bit vectors.
22117 static SDValue PerformShuffleCombine256(SDNode *N, SelectionDAG &DAG,
22118 TargetLowering::DAGCombinerInfo &DCI,
22119 const X86Subtarget* Subtarget) {
22121 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22122 SDValue V1 = SVOp->getOperand(0);
22123 SDValue V2 = SVOp->getOperand(1);
22124 EVT VT = SVOp->getValueType(0);
22125 unsigned NumElems = VT.getVectorNumElements();
22127 if (V1.getOpcode() == ISD::CONCAT_VECTORS &&
22128 V2.getOpcode() == ISD::CONCAT_VECTORS) {
22132 // V UNDEF BUILD_VECTOR UNDEF
22134 // CONCAT_VECTOR CONCAT_VECTOR
22137 // RESULT: V + zero extended
22139 if (V2.getOperand(0).getOpcode() != ISD::BUILD_VECTOR ||
22140 V2.getOperand(1).getOpcode() != ISD::UNDEF ||
22141 V1.getOperand(1).getOpcode() != ISD::UNDEF)
22144 if (!ISD::isBuildVectorAllZeros(V2.getOperand(0).getNode()))
22147 // To match the shuffle mask, the first half of the mask should
22148 // be exactly the first vector, and all the rest a splat with the
22149 // first element of the second one.
22150 for (unsigned i = 0; i != NumElems/2; ++i)
22151 if (!isUndefOrEqual(SVOp->getMaskElt(i), i) ||
22152 !isUndefOrEqual(SVOp->getMaskElt(i+NumElems/2), NumElems))
22155 // If V1 is coming from a vector load then just fold to a VZEXT_LOAD.
22156 if (LoadSDNode *Ld = dyn_cast<LoadSDNode>(V1.getOperand(0))) {
22157 if (Ld->hasNUsesOfValue(1, 0)) {
22158 SDVTList Tys = DAG.getVTList(MVT::v4i64, MVT::Other);
22159 SDValue Ops[] = { Ld->getChain(), Ld->getBasePtr() };
22161 DAG.getMemIntrinsicNode(X86ISD::VZEXT_LOAD, dl, Tys, Ops,
22163 Ld->getPointerInfo(),
22164 Ld->getAlignment(),
22165 false/*isVolatile*/, true/*ReadMem*/,
22166 false/*WriteMem*/);
22168 // Make sure the newly-created LOAD is in the same position as Ld in
22169 // terms of dependency. We create a TokenFactor for Ld and ResNode,
22170 // and update uses of Ld's output chain to use the TokenFactor.
22171 if (Ld->hasAnyUseOfValue(1)) {
22172 SDValue NewChain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
22173 SDValue(Ld, 1), SDValue(ResNode.getNode(), 1));
22174 DAG.ReplaceAllUsesOfValueWith(SDValue(Ld, 1), NewChain);
22175 DAG.UpdateNodeOperands(NewChain.getNode(), SDValue(Ld, 1),
22176 SDValue(ResNode.getNode(), 1));
22179 return DAG.getNode(ISD::BITCAST, dl, VT, ResNode);
22183 // Emit a zeroed vector and insert the desired subvector on its
22185 SDValue Zeros = getZeroVector(VT, Subtarget, DAG, dl);
22186 SDValue InsV = Insert128BitVector(Zeros, V1.getOperand(0), 0, DAG, dl);
22187 return DCI.CombineTo(N, InsV);
22190 //===--------------------------------------------------------------------===//
22191 // Combine some shuffles into subvector extracts and inserts:
22194 // vector_shuffle <4, 5, 6, 7, u, u, u, u> or <2, 3, u, u>
22195 if (isShuffleHigh128VectorInsertLow(SVOp)) {
22196 SDValue V = Extract128BitVector(V1, NumElems/2, DAG, dl);
22197 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, 0, DAG, dl);
22198 return DCI.CombineTo(N, InsV);
22201 // vector_shuffle <u, u, u, u, 0, 1, 2, 3> or <u, u, 0, 1>
22202 if (isShuffleLow128VectorInsertHigh(SVOp)) {
22203 SDValue V = Extract128BitVector(V1, 0, DAG, dl);
22204 SDValue InsV = Insert128BitVector(DAG.getUNDEF(VT), V, NumElems/2, DAG, dl);
22205 return DCI.CombineTo(N, InsV);
22211 /// \brief Combine an arbitrary chain of shuffles into a single instruction if
22214 /// This is the leaf of the recursive combinine below. When we have found some
22215 /// chain of single-use x86 shuffle instructions and accumulated the combined
22216 /// shuffle mask represented by them, this will try to pattern match that mask
22217 /// into either a single instruction if there is a special purpose instruction
22218 /// for this operation, or into a PSHUFB instruction which is a fully general
22219 /// instruction but should only be used to replace chains over a certain depth.
22220 static bool combineX86ShuffleChain(SDValue Op, SDValue Root, ArrayRef<int> Mask,
22221 int Depth, bool HasPSHUFB, SelectionDAG &DAG,
22222 TargetLowering::DAGCombinerInfo &DCI,
22223 const X86Subtarget *Subtarget) {
22224 assert(!Mask.empty() && "Cannot combine an empty shuffle mask!");
22226 // Find the operand that enters the chain. Note that multiple uses are OK
22227 // here, we're not going to remove the operand we find.
22228 SDValue Input = Op.getOperand(0);
22229 while (Input.getOpcode() == ISD::BITCAST)
22230 Input = Input.getOperand(0);
22232 MVT VT = Input.getSimpleValueType();
22233 MVT RootVT = Root.getSimpleValueType();
22236 // Just remove no-op shuffle masks.
22237 if (Mask.size() == 1) {
22238 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Input),
22243 // Use the float domain if the operand type is a floating point type.
22244 bool FloatDomain = VT.isFloatingPoint();
22246 // For floating point shuffles, we don't have free copies in the shuffle
22247 // instructions or the ability to load as part of the instruction, so
22248 // canonicalize their shuffles to UNPCK or MOV variants.
22250 // Note that even with AVX we prefer the PSHUFD form of shuffle for integer
22251 // vectors because it can have a load folded into it that UNPCK cannot. This
22252 // doesn't preclude something switching to the shorter encoding post-RA.
22254 if (Mask.equals(0, 0) || Mask.equals(1, 1)) {
22255 bool Lo = Mask.equals(0, 0);
22258 // Check if we have SSE3 which will let us use MOVDDUP. That instruction
22259 // is no slower than UNPCKLPD but has the option to fold the input operand
22260 // into even an unaligned memory load.
22261 if (Lo && Subtarget->hasSSE3()) {
22262 Shuffle = X86ISD::MOVDDUP;
22263 ShuffleVT = MVT::v2f64;
22265 // We have MOVLHPS and MOVHLPS throughout SSE and they encode smaller
22266 // than the UNPCK variants.
22267 Shuffle = Lo ? X86ISD::MOVLHPS : X86ISD::MOVHLPS;
22268 ShuffleVT = MVT::v4f32;
22270 if (Depth == 1 && Root->getOpcode() == Shuffle)
22271 return false; // Nothing to do!
22272 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22273 DCI.AddToWorklist(Op.getNode());
22274 if (Shuffle == X86ISD::MOVDDUP)
22275 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22277 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22278 DCI.AddToWorklist(Op.getNode());
22279 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22283 if (Subtarget->hasSSE3() &&
22284 (Mask.equals(0, 0, 2, 2) || Mask.equals(1, 1, 3, 3))) {
22285 bool Lo = Mask.equals(0, 0, 2, 2);
22286 unsigned Shuffle = Lo ? X86ISD::MOVSLDUP : X86ISD::MOVSHDUP;
22287 MVT ShuffleVT = MVT::v4f32;
22288 if (Depth == 1 && Root->getOpcode() == Shuffle)
22289 return false; // Nothing to do!
22290 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22291 DCI.AddToWorklist(Op.getNode());
22292 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op);
22293 DCI.AddToWorklist(Op.getNode());
22294 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22298 if (Mask.equals(0, 0, 1, 1) || Mask.equals(2, 2, 3, 3)) {
22299 bool Lo = Mask.equals(0, 0, 1, 1);
22300 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22301 MVT ShuffleVT = MVT::v4f32;
22302 if (Depth == 1 && Root->getOpcode() == Shuffle)
22303 return false; // Nothing to do!
22304 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22305 DCI.AddToWorklist(Op.getNode());
22306 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22307 DCI.AddToWorklist(Op.getNode());
22308 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22314 // We always canonicalize the 8 x i16 and 16 x i8 shuffles into their UNPCK
22315 // variants as none of these have single-instruction variants that are
22316 // superior to the UNPCK formulation.
22317 if (!FloatDomain &&
22318 (Mask.equals(0, 0, 1, 1, 2, 2, 3, 3) ||
22319 Mask.equals(4, 4, 5, 5, 6, 6, 7, 7) ||
22320 Mask.equals(0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7) ||
22321 Mask.equals(8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15,
22323 bool Lo = Mask[0] == 0;
22324 unsigned Shuffle = Lo ? X86ISD::UNPCKL : X86ISD::UNPCKH;
22325 if (Depth == 1 && Root->getOpcode() == Shuffle)
22326 return false; // Nothing to do!
22328 switch (Mask.size()) {
22330 ShuffleVT = MVT::v8i16;
22333 ShuffleVT = MVT::v16i8;
22336 llvm_unreachable("Impossible mask size!");
22338 Op = DAG.getNode(ISD::BITCAST, DL, ShuffleVT, Input);
22339 DCI.AddToWorklist(Op.getNode());
22340 Op = DAG.getNode(Shuffle, DL, ShuffleVT, Op, Op);
22341 DCI.AddToWorklist(Op.getNode());
22342 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22347 // Don't try to re-form single instruction chains under any circumstances now
22348 // that we've done encoding canonicalization for them.
22352 // If we have 3 or more shuffle instructions or a chain involving PSHUFB, we
22353 // can replace them with a single PSHUFB instruction profitably. Intel's
22354 // manuals suggest only using PSHUFB if doing so replacing 5 instructions, but
22355 // in practice PSHUFB tends to be *very* fast so we're more aggressive.
22356 if ((Depth >= 3 || HasPSHUFB) && Subtarget->hasSSSE3()) {
22357 SmallVector<SDValue, 16> PSHUFBMask;
22358 assert(Mask.size() <= 16 && "Can't shuffle elements smaller than bytes!");
22359 int Ratio = 16 / Mask.size();
22360 for (unsigned i = 0; i < 16; ++i) {
22361 if (Mask[i / Ratio] == SM_SentinelUndef) {
22362 PSHUFBMask.push_back(DAG.getUNDEF(MVT::i8));
22365 int M = Mask[i / Ratio] != SM_SentinelZero
22366 ? Ratio * Mask[i / Ratio] + i % Ratio
22368 PSHUFBMask.push_back(DAG.getConstant(M, MVT::i8));
22370 Op = DAG.getNode(ISD::BITCAST, DL, MVT::v16i8, Input);
22371 DCI.AddToWorklist(Op.getNode());
22372 SDValue PSHUFBMaskOp =
22373 DAG.getNode(ISD::BUILD_VECTOR, DL, MVT::v16i8, PSHUFBMask);
22374 DCI.AddToWorklist(PSHUFBMaskOp.getNode());
22375 Op = DAG.getNode(X86ISD::PSHUFB, DL, MVT::v16i8, Op, PSHUFBMaskOp);
22376 DCI.AddToWorklist(Op.getNode());
22377 DCI.CombineTo(Root.getNode(), DAG.getNode(ISD::BITCAST, DL, RootVT, Op),
22382 // Failed to find any combines.
22386 /// \brief Fully generic combining of x86 shuffle instructions.
22388 /// This should be the last combine run over the x86 shuffle instructions. Once
22389 /// they have been fully optimized, this will recursively consider all chains
22390 /// of single-use shuffle instructions, build a generic model of the cumulative
22391 /// shuffle operation, and check for simpler instructions which implement this
22392 /// operation. We use this primarily for two purposes:
22394 /// 1) Collapse generic shuffles to specialized single instructions when
22395 /// equivalent. In most cases, this is just an encoding size win, but
22396 /// sometimes we will collapse multiple generic shuffles into a single
22397 /// special-purpose shuffle.
22398 /// 2) Look for sequences of shuffle instructions with 3 or more total
22399 /// instructions, and replace them with the slightly more expensive SSSE3
22400 /// PSHUFB instruction if available. We do this as the last combining step
22401 /// to ensure we avoid using PSHUFB if we can implement the shuffle with
22402 /// a suitable short sequence of other instructions. The PHUFB will either
22403 /// use a register or have to read from memory and so is slightly (but only
22404 /// slightly) more expensive than the other shuffle instructions.
22406 /// Because this is inherently a quadratic operation (for each shuffle in
22407 /// a chain, we recurse up the chain), the depth is limited to 8 instructions.
22408 /// This should never be an issue in practice as the shuffle lowering doesn't
22409 /// produce sequences of more than 8 instructions.
22411 /// FIXME: We will currently miss some cases where the redundant shuffling
22412 /// would simplify under the threshold for PSHUFB formation because of
22413 /// combine-ordering. To fix this, we should do the redundant instruction
22414 /// combining in this recursive walk.
22415 static bool combineX86ShufflesRecursively(SDValue Op, SDValue Root,
22416 ArrayRef<int> RootMask,
22417 int Depth, bool HasPSHUFB,
22419 TargetLowering::DAGCombinerInfo &DCI,
22420 const X86Subtarget *Subtarget) {
22421 // Bound the depth of our recursive combine because this is ultimately
22422 // quadratic in nature.
22426 // Directly rip through bitcasts to find the underlying operand.
22427 while (Op.getOpcode() == ISD::BITCAST && Op.getOperand(0).hasOneUse())
22428 Op = Op.getOperand(0);
22430 MVT VT = Op.getSimpleValueType();
22431 if (!VT.isVector())
22432 return false; // Bail if we hit a non-vector.
22433 // FIXME: This routine should be taught about 256-bit shuffles, or a 256-bit
22434 // version should be added.
22435 if (VT.getSizeInBits() != 128)
22438 assert(Root.getSimpleValueType().isVector() &&
22439 "Shuffles operate on vector types!");
22440 assert(VT.getSizeInBits() == Root.getSimpleValueType().getSizeInBits() &&
22441 "Can only combine shuffles of the same vector register size.");
22443 if (!isTargetShuffle(Op.getOpcode()))
22445 SmallVector<int, 16> OpMask;
22447 bool HaveMask = getTargetShuffleMask(Op.getNode(), VT, OpMask, IsUnary);
22448 // We only can combine unary shuffles which we can decode the mask for.
22449 if (!HaveMask || !IsUnary)
22452 assert(VT.getVectorNumElements() == OpMask.size() &&
22453 "Different mask size from vector size!");
22454 assert(((RootMask.size() > OpMask.size() &&
22455 RootMask.size() % OpMask.size() == 0) ||
22456 (OpMask.size() > RootMask.size() &&
22457 OpMask.size() % RootMask.size() == 0) ||
22458 OpMask.size() == RootMask.size()) &&
22459 "The smaller number of elements must divide the larger.");
22460 int RootRatio = std::max<int>(1, OpMask.size() / RootMask.size());
22461 int OpRatio = std::max<int>(1, RootMask.size() / OpMask.size());
22462 assert(((RootRatio == 1 && OpRatio == 1) ||
22463 (RootRatio == 1) != (OpRatio == 1)) &&
22464 "Must not have a ratio for both incoming and op masks!");
22466 SmallVector<int, 16> Mask;
22467 Mask.reserve(std::max(OpMask.size(), RootMask.size()));
22469 // Merge this shuffle operation's mask into our accumulated mask. Note that
22470 // this shuffle's mask will be the first applied to the input, followed by the
22471 // root mask to get us all the way to the root value arrangement. The reason
22472 // for this order is that we are recursing up the operation chain.
22473 for (int i = 0, e = std::max(OpMask.size(), RootMask.size()); i < e; ++i) {
22474 int RootIdx = i / RootRatio;
22475 if (RootMask[RootIdx] < 0) {
22476 // This is a zero or undef lane, we're done.
22477 Mask.push_back(RootMask[RootIdx]);
22481 int RootMaskedIdx = RootMask[RootIdx] * RootRatio + i % RootRatio;
22482 int OpIdx = RootMaskedIdx / OpRatio;
22483 if (OpMask[OpIdx] < 0) {
22484 // The incoming lanes are zero or undef, it doesn't matter which ones we
22486 Mask.push_back(OpMask[OpIdx]);
22490 // Ok, we have non-zero lanes, map them through.
22491 Mask.push_back(OpMask[OpIdx] * OpRatio +
22492 RootMaskedIdx % OpRatio);
22495 // See if we can recurse into the operand to combine more things.
22496 switch (Op.getOpcode()) {
22497 case X86ISD::PSHUFB:
22499 case X86ISD::PSHUFD:
22500 case X86ISD::PSHUFHW:
22501 case X86ISD::PSHUFLW:
22502 if (Op.getOperand(0).hasOneUse() &&
22503 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22504 HasPSHUFB, DAG, DCI, Subtarget))
22508 case X86ISD::UNPCKL:
22509 case X86ISD::UNPCKH:
22510 assert(Op.getOperand(0) == Op.getOperand(1) && "We only combine unary shuffles!");
22511 // We can't check for single use, we have to check that this shuffle is the only user.
22512 if (Op->isOnlyUserOf(Op.getOperand(0).getNode()) &&
22513 combineX86ShufflesRecursively(Op.getOperand(0), Root, Mask, Depth + 1,
22514 HasPSHUFB, DAG, DCI, Subtarget))
22519 // Minor canonicalization of the accumulated shuffle mask to make it easier
22520 // to match below. All this does is detect masks with squential pairs of
22521 // elements, and shrink them to the half-width mask. It does this in a loop
22522 // so it will reduce the size of the mask to the minimal width mask which
22523 // performs an equivalent shuffle.
22524 SmallVector<int, 16> WidenedMask;
22525 while (Mask.size() > 1 && canWidenShuffleElements(Mask, WidenedMask)) {
22526 Mask = std::move(WidenedMask);
22527 WidenedMask.clear();
22530 return combineX86ShuffleChain(Op, Root, Mask, Depth, HasPSHUFB, DAG, DCI,
22534 /// \brief Get the PSHUF-style mask from PSHUF node.
22536 /// This is a very minor wrapper around getTargetShuffleMask to easy forming v4
22537 /// PSHUF-style masks that can be reused with such instructions.
22538 static SmallVector<int, 4> getPSHUFShuffleMask(SDValue N) {
22539 SmallVector<int, 4> Mask;
22541 bool HaveMask = getTargetShuffleMask(N.getNode(), N.getSimpleValueType(), Mask, IsUnary);
22545 switch (N.getOpcode()) {
22546 case X86ISD::PSHUFD:
22548 case X86ISD::PSHUFLW:
22551 case X86ISD::PSHUFHW:
22552 Mask.erase(Mask.begin(), Mask.begin() + 4);
22553 for (int &M : Mask)
22557 llvm_unreachable("No valid shuffle instruction found!");
22561 /// \brief Search for a combinable shuffle across a chain ending in pshufd.
22563 /// We walk up the chain and look for a combinable shuffle, skipping over
22564 /// shuffles that we could hoist this shuffle's transformation past without
22565 /// altering anything.
22567 combineRedundantDWordShuffle(SDValue N, MutableArrayRef<int> Mask,
22569 TargetLowering::DAGCombinerInfo &DCI) {
22570 assert(N.getOpcode() == X86ISD::PSHUFD &&
22571 "Called with something other than an x86 128-bit half shuffle!");
22574 // Walk up a single-use chain looking for a combinable shuffle. Keep a stack
22575 // of the shuffles in the chain so that we can form a fresh chain to replace
22577 SmallVector<SDValue, 8> Chain;
22578 SDValue V = N.getOperand(0);
22579 for (; V.hasOneUse(); V = V.getOperand(0)) {
22580 switch (V.getOpcode()) {
22582 return SDValue(); // Nothing combined!
22585 // Skip bitcasts as we always know the type for the target specific
22589 case X86ISD::PSHUFD:
22590 // Found another dword shuffle.
22593 case X86ISD::PSHUFLW:
22594 // Check that the low words (being shuffled) are the identity in the
22595 // dword shuffle, and the high words are self-contained.
22596 if (Mask[0] != 0 || Mask[1] != 1 ||
22597 !(Mask[2] >= 2 && Mask[2] < 4 && Mask[3] >= 2 && Mask[3] < 4))
22600 Chain.push_back(V);
22603 case X86ISD::PSHUFHW:
22604 // Check that the high words (being shuffled) are the identity in the
22605 // dword shuffle, and the low words are self-contained.
22606 if (Mask[2] != 2 || Mask[3] != 3 ||
22607 !(Mask[0] >= 0 && Mask[0] < 2 && Mask[1] >= 0 && Mask[1] < 2))
22610 Chain.push_back(V);
22613 case X86ISD::UNPCKL:
22614 case X86ISD::UNPCKH:
22615 // For either i8 -> i16 or i16 -> i32 unpacks, we can combine a dword
22616 // shuffle into a preceding word shuffle.
22617 if (V.getValueType() != MVT::v16i8 && V.getValueType() != MVT::v8i16)
22620 // Search for a half-shuffle which we can combine with.
22621 unsigned CombineOp =
22622 V.getOpcode() == X86ISD::UNPCKL ? X86ISD::PSHUFLW : X86ISD::PSHUFHW;
22623 if (V.getOperand(0) != V.getOperand(1) ||
22624 !V->isOnlyUserOf(V.getOperand(0).getNode()))
22626 Chain.push_back(V);
22627 V = V.getOperand(0);
22629 switch (V.getOpcode()) {
22631 return SDValue(); // Nothing to combine.
22633 case X86ISD::PSHUFLW:
22634 case X86ISD::PSHUFHW:
22635 if (V.getOpcode() == CombineOp)
22638 Chain.push_back(V);
22642 V = V.getOperand(0);
22646 } while (V.hasOneUse());
22649 // Break out of the loop if we break out of the switch.
22653 if (!V.hasOneUse())
22654 // We fell out of the loop without finding a viable combining instruction.
22657 // Merge this node's mask and our incoming mask.
22658 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22659 for (int &M : Mask)
22661 V = DAG.getNode(V.getOpcode(), DL, V.getValueType(), V.getOperand(0),
22662 getV4X86ShuffleImm8ForMask(Mask, DAG));
22664 // Rebuild the chain around this new shuffle.
22665 while (!Chain.empty()) {
22666 SDValue W = Chain.pop_back_val();
22668 if (V.getValueType() != W.getOperand(0).getValueType())
22669 V = DAG.getNode(ISD::BITCAST, DL, W.getOperand(0).getValueType(), V);
22671 switch (W.getOpcode()) {
22673 llvm_unreachable("Only PSHUF and UNPCK instructions get here!");
22675 case X86ISD::UNPCKL:
22676 case X86ISD::UNPCKH:
22677 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, V);
22680 case X86ISD::PSHUFD:
22681 case X86ISD::PSHUFLW:
22682 case X86ISD::PSHUFHW:
22683 V = DAG.getNode(W.getOpcode(), DL, W.getValueType(), V, W.getOperand(1));
22687 if (V.getValueType() != N.getValueType())
22688 V = DAG.getNode(ISD::BITCAST, DL, N.getValueType(), V);
22690 // Return the new chain to replace N.
22694 /// \brief Search for a combinable shuffle across a chain ending in pshuflw or pshufhw.
22696 /// We walk up the chain, skipping shuffles of the other half and looking
22697 /// through shuffles which switch halves trying to find a shuffle of the same
22698 /// pair of dwords.
22699 static bool combineRedundantHalfShuffle(SDValue N, MutableArrayRef<int> Mask,
22701 TargetLowering::DAGCombinerInfo &DCI) {
22703 (N.getOpcode() == X86ISD::PSHUFLW || N.getOpcode() == X86ISD::PSHUFHW) &&
22704 "Called with something other than an x86 128-bit half shuffle!");
22706 unsigned CombineOpcode = N.getOpcode();
22708 // Walk up a single-use chain looking for a combinable shuffle.
22709 SDValue V = N.getOperand(0);
22710 for (; V.hasOneUse(); V = V.getOperand(0)) {
22711 switch (V.getOpcode()) {
22713 return false; // Nothing combined!
22716 // Skip bitcasts as we always know the type for the target specific
22720 case X86ISD::PSHUFLW:
22721 case X86ISD::PSHUFHW:
22722 if (V.getOpcode() == CombineOpcode)
22725 // Other-half shuffles are no-ops.
22728 // Break out of the loop if we break out of the switch.
22732 if (!V.hasOneUse())
22733 // We fell out of the loop without finding a viable combining instruction.
22736 // Combine away the bottom node as its shuffle will be accumulated into
22737 // a preceding shuffle.
22738 DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22740 // Record the old value.
22743 // Merge this node's mask and our incoming mask (adjusted to account for all
22744 // the pshufd instructions encountered).
22745 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22746 for (int &M : Mask)
22748 V = DAG.getNode(V.getOpcode(), DL, MVT::v8i16, V.getOperand(0),
22749 getV4X86ShuffleImm8ForMask(Mask, DAG));
22751 // Check that the shuffles didn't cancel each other out. If not, we need to
22752 // combine to the new one.
22754 // Replace the combinable shuffle with the combined one, updating all users
22755 // so that we re-evaluate the chain here.
22756 DCI.CombineTo(Old.getNode(), V, /*AddTo*/ true);
22761 /// \brief Try to combine x86 target specific shuffles.
22762 static SDValue PerformTargetShuffleCombine(SDValue N, SelectionDAG &DAG,
22763 TargetLowering::DAGCombinerInfo &DCI,
22764 const X86Subtarget *Subtarget) {
22766 MVT VT = N.getSimpleValueType();
22767 SmallVector<int, 4> Mask;
22769 switch (N.getOpcode()) {
22770 case X86ISD::PSHUFD:
22771 case X86ISD::PSHUFLW:
22772 case X86ISD::PSHUFHW:
22773 Mask = getPSHUFShuffleMask(N);
22774 assert(Mask.size() == 4);
22780 // Nuke no-op shuffles that show up after combining.
22781 if (isNoopShuffleMask(Mask))
22782 return DCI.CombineTo(N.getNode(), N.getOperand(0), /*AddTo*/ true);
22784 // Look for simplifications involving one or two shuffle instructions.
22785 SDValue V = N.getOperand(0);
22786 switch (N.getOpcode()) {
22789 case X86ISD::PSHUFLW:
22790 case X86ISD::PSHUFHW:
22791 assert(VT == MVT::v8i16);
22794 if (combineRedundantHalfShuffle(N, Mask, DAG, DCI))
22795 return SDValue(); // We combined away this shuffle, so we're done.
22797 // See if this reduces to a PSHUFD which is no more expensive and can
22798 // combine with more operations. Note that it has to at least flip the
22799 // dwords as otherwise it would have been removed as a no-op.
22800 if (Mask[0] == 2 && Mask[1] == 3 && Mask[2] == 0 && Mask[3] == 1) {
22801 int DMask[] = {0, 1, 2, 3};
22802 int DOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 2;
22803 DMask[DOffset + 0] = DOffset + 1;
22804 DMask[DOffset + 1] = DOffset + 0;
22805 V = DAG.getNode(ISD::BITCAST, DL, MVT::v4i32, V);
22806 DCI.AddToWorklist(V.getNode());
22807 V = DAG.getNode(X86ISD::PSHUFD, DL, MVT::v4i32, V,
22808 getV4X86ShuffleImm8ForMask(DMask, DAG));
22809 DCI.AddToWorklist(V.getNode());
22810 return DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, V);
22813 // Look for shuffle patterns which can be implemented as a single unpack.
22814 // FIXME: This doesn't handle the location of the PSHUFD generically, and
22815 // only works when we have a PSHUFD followed by two half-shuffles.
22816 if (Mask[0] == Mask[1] && Mask[2] == Mask[3] &&
22817 (V.getOpcode() == X86ISD::PSHUFLW ||
22818 V.getOpcode() == X86ISD::PSHUFHW) &&
22819 V.getOpcode() != N.getOpcode() &&
22821 SDValue D = V.getOperand(0);
22822 while (D.getOpcode() == ISD::BITCAST && D.hasOneUse())
22823 D = D.getOperand(0);
22824 if (D.getOpcode() == X86ISD::PSHUFD && D.hasOneUse()) {
22825 SmallVector<int, 4> VMask = getPSHUFShuffleMask(V);
22826 SmallVector<int, 4> DMask = getPSHUFShuffleMask(D);
22827 int NOffset = N.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22828 int VOffset = V.getOpcode() == X86ISD::PSHUFLW ? 0 : 4;
22830 for (int i = 0; i < 4; ++i) {
22831 WordMask[i + NOffset] = Mask[i] + NOffset;
22832 WordMask[i + VOffset] = VMask[i] + VOffset;
22834 // Map the word mask through the DWord mask.
22836 for (int i = 0; i < 8; ++i)
22837 MappedMask[i] = 2 * DMask[WordMask[i] / 2] + WordMask[i] % 2;
22838 const int UnpackLoMask[] = {0, 0, 1, 1, 2, 2, 3, 3};
22839 const int UnpackHiMask[] = {4, 4, 5, 5, 6, 6, 7, 7};
22840 if (std::equal(std::begin(MappedMask), std::end(MappedMask),
22841 std::begin(UnpackLoMask)) ||
22842 std::equal(std::begin(MappedMask), std::end(MappedMask),
22843 std::begin(UnpackHiMask))) {
22844 // We can replace all three shuffles with an unpack.
22845 V = DAG.getNode(ISD::BITCAST, DL, MVT::v8i16, D.getOperand(0));
22846 DCI.AddToWorklist(V.getNode());
22847 return DAG.getNode(MappedMask[0] == 0 ? X86ISD::UNPCKL
22849 DL, MVT::v8i16, V, V);
22856 case X86ISD::PSHUFD:
22857 if (SDValue NewN = combineRedundantDWordShuffle(N, Mask, DAG, DCI))
22866 /// \brief Try to combine a shuffle into a target-specific add-sub node.
22868 /// We combine this directly on the abstract vector shuffle nodes so it is
22869 /// easier to generically match. We also insert dummy vector shuffle nodes for
22870 /// the operands which explicitly discard the lanes which are unused by this
22871 /// operation to try to flow through the rest of the combiner the fact that
22872 /// they're unused.
22873 static SDValue combineShuffleToAddSub(SDNode *N, SelectionDAG &DAG) {
22875 EVT VT = N->getValueType(0);
22877 // We only handle target-independent shuffles.
22878 // FIXME: It would be easy and harmless to use the target shuffle mask
22879 // extraction tool to support more.
22880 if (N->getOpcode() != ISD::VECTOR_SHUFFLE)
22883 auto *SVN = cast<ShuffleVectorSDNode>(N);
22884 ArrayRef<int> Mask = SVN->getMask();
22885 SDValue V1 = N->getOperand(0);
22886 SDValue V2 = N->getOperand(1);
22888 // We require the first shuffle operand to be the SUB node, and the second to
22889 // be the ADD node.
22890 // FIXME: We should support the commuted patterns.
22891 if (V1->getOpcode() != ISD::FSUB || V2->getOpcode() != ISD::FADD)
22894 // If there are other uses of these operations we can't fold them.
22895 if (!V1->hasOneUse() || !V2->hasOneUse())
22898 // Ensure that both operations have the same operands. Note that we can
22899 // commute the FADD operands.
22900 SDValue LHS = V1->getOperand(0), RHS = V1->getOperand(1);
22901 if ((V2->getOperand(0) != LHS || V2->getOperand(1) != RHS) &&
22902 (V2->getOperand(0) != RHS || V2->getOperand(1) != LHS))
22905 // We're looking for blends between FADD and FSUB nodes. We insist on these
22906 // nodes being lined up in a specific expected pattern.
22907 if (!(isShuffleEquivalent(V1, V2, Mask, 0, 3) ||
22908 isShuffleEquivalent(V1, V2, Mask, 0, 5, 2, 7) ||
22909 isShuffleEquivalent(V1, V2, Mask, 0, 9, 2, 11, 4, 13, 6, 15)))
22912 // Only specific types are legal at this point, assert so we notice if and
22913 // when these change.
22914 assert((VT == MVT::v4f32 || VT == MVT::v2f64 || VT == MVT::v8f32 ||
22915 VT == MVT::v4f64) &&
22916 "Unknown vector type encountered!");
22918 return DAG.getNode(X86ISD::ADDSUB, DL, VT, LHS, RHS);
22921 /// PerformShuffleCombine - Performs several different shuffle combines.
22922 static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
22923 TargetLowering::DAGCombinerInfo &DCI,
22924 const X86Subtarget *Subtarget) {
22926 SDValue N0 = N->getOperand(0);
22927 SDValue N1 = N->getOperand(1);
22928 EVT VT = N->getValueType(0);
22930 // Don't create instructions with illegal types after legalize types has run.
22931 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
22932 if (!DCI.isBeforeLegalize() && !TLI.isTypeLegal(VT.getVectorElementType()))
22935 // If we have legalized the vector types, look for blends of FADD and FSUB
22936 // nodes that we can fuse into an ADDSUB node.
22937 if (TLI.isTypeLegal(VT) && Subtarget->hasSSE3())
22938 if (SDValue AddSub = combineShuffleToAddSub(N, DAG))
22941 // Combine 256-bit vector shuffles. This is only profitable when in AVX mode
22942 if (Subtarget->hasFp256() && VT.is256BitVector() &&
22943 N->getOpcode() == ISD::VECTOR_SHUFFLE)
22944 return PerformShuffleCombine256(N, DAG, DCI, Subtarget);
22946 // During Type Legalization, when promoting illegal vector types,
22947 // the backend might introduce new shuffle dag nodes and bitcasts.
22949 // This code performs the following transformation:
22950 // fold: (shuffle (bitcast (BINOP A, B)), Undef, <Mask>) ->
22951 // (shuffle (BINOP (bitcast A), (bitcast B)), Undef, <Mask>)
22953 // We do this only if both the bitcast and the BINOP dag nodes have
22954 // one use. Also, perform this transformation only if the new binary
22955 // operation is legal. This is to avoid introducing dag nodes that
22956 // potentially need to be further expanded (or custom lowered) into a
22957 // less optimal sequence of dag nodes.
22958 if (!DCI.isBeforeLegalize() && DCI.isBeforeLegalizeOps() &&
22959 N1.getOpcode() == ISD::UNDEF && N0.hasOneUse() &&
22960 N0.getOpcode() == ISD::BITCAST) {
22961 SDValue BC0 = N0.getOperand(0);
22962 EVT SVT = BC0.getValueType();
22963 unsigned Opcode = BC0.getOpcode();
22964 unsigned NumElts = VT.getVectorNumElements();
22966 if (BC0.hasOneUse() && SVT.isVector() &&
22967 SVT.getVectorNumElements() * 2 == NumElts &&
22968 TLI.isOperationLegal(Opcode, VT)) {
22969 bool CanFold = false;
22981 unsigned SVTNumElts = SVT.getVectorNumElements();
22982 ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
22983 for (unsigned i = 0, e = SVTNumElts; i != e && CanFold; ++i)
22984 CanFold = SVOp->getMaskElt(i) == (int)(i * 2);
22985 for (unsigned i = SVTNumElts, e = NumElts; i != e && CanFold; ++i)
22986 CanFold = SVOp->getMaskElt(i) < 0;
22989 SDValue BC00 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(0));
22990 SDValue BC01 = DAG.getNode(ISD::BITCAST, dl, VT, BC0.getOperand(1));
22991 SDValue NewBinOp = DAG.getNode(BC0.getOpcode(), dl, VT, BC00, BC01);
22992 return DAG.getVectorShuffle(VT, dl, NewBinOp, N1, &SVOp->getMask()[0]);
22997 // Only handle 128 wide vector from here on.
22998 if (!VT.is128BitVector())
23001 // Combine a vector_shuffle that is equal to build_vector load1, load2, load3,
23002 // load4, <0, 1, 2, 3> into a 128-bit load if the load addresses are
23003 // consecutive, non-overlapping, and in the right order.
23004 SmallVector<SDValue, 16> Elts;
23005 for (unsigned i = 0, e = VT.getVectorNumElements(); i != e; ++i)
23006 Elts.push_back(getShuffleScalarElt(N, i, DAG, 0));
23008 SDValue LD = EltsFromConsecutiveLoads(VT, Elts, dl, DAG, true);
23012 if (isTargetShuffle(N->getOpcode())) {
23014 PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
23015 if (Shuffle.getNode())
23018 // Try recursively combining arbitrary sequences of x86 shuffle
23019 // instructions into higher-order shuffles. We do this after combining
23020 // specific PSHUF instruction sequences into their minimal form so that we
23021 // can evaluate how many specialized shuffle instructions are involved in
23022 // a particular chain.
23023 SmallVector<int, 1> NonceMask; // Just a placeholder.
23024 NonceMask.push_back(0);
23025 if (combineX86ShufflesRecursively(SDValue(N, 0), SDValue(N, 0), NonceMask,
23026 /*Depth*/ 1, /*HasPSHUFB*/ false, DAG,
23028 return SDValue(); // This routine will use CombineTo to replace N.
23034 /// PerformTruncateCombine - Converts truncate operation to
23035 /// a sequence of vector shuffle operations.
23036 /// It is possible when we truncate 256-bit vector to 128-bit vector
23037 static SDValue PerformTruncateCombine(SDNode *N, SelectionDAG &DAG,
23038 TargetLowering::DAGCombinerInfo &DCI,
23039 const X86Subtarget *Subtarget) {
23043 /// XFormVExtractWithShuffleIntoLoad - Check if a vector extract from a target
23044 /// specific shuffle of a load can be folded into a single element load.
23045 /// Similar handling for VECTOR_SHUFFLE is performed by DAGCombiner, but
23046 /// shuffles have been custom lowered so we need to handle those here.
23047 static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
23048 TargetLowering::DAGCombinerInfo &DCI) {
23049 if (DCI.isBeforeLegalizeOps())
23052 SDValue InVec = N->getOperand(0);
23053 SDValue EltNo = N->getOperand(1);
23055 if (!isa<ConstantSDNode>(EltNo))
23058 EVT OriginalVT = InVec.getValueType();
23060 if (InVec.getOpcode() == ISD::BITCAST) {
23061 // Don't duplicate a load with other uses.
23062 if (!InVec.hasOneUse())
23064 EVT BCVT = InVec.getOperand(0).getValueType();
23065 if (BCVT.getVectorNumElements() != OriginalVT.getVectorNumElements())
23067 InVec = InVec.getOperand(0);
23070 EVT CurrentVT = InVec.getValueType();
23072 if (!isTargetShuffle(InVec.getOpcode()))
23075 // Don't duplicate a load with other uses.
23076 if (!InVec.hasOneUse())
23079 SmallVector<int, 16> ShuffleMask;
23081 if (!getTargetShuffleMask(InVec.getNode(), CurrentVT.getSimpleVT(),
23082 ShuffleMask, UnaryShuffle))
23085 // Select the input vector, guarding against out of range extract vector.
23086 unsigned NumElems = CurrentVT.getVectorNumElements();
23087 int Elt = cast<ConstantSDNode>(EltNo)->getZExtValue();
23088 int Idx = (Elt > (int)NumElems) ? -1 : ShuffleMask[Elt];
23089 SDValue LdNode = (Idx < (int)NumElems) ? InVec.getOperand(0)
23090 : InVec.getOperand(1);
23092 // If inputs to shuffle are the same for both ops, then allow 2 uses
23093 unsigned AllowedUses = InVec.getNumOperands() > 1 &&
23094 InVec.getOperand(0) == InVec.getOperand(1) ? 2 : 1;
23096 if (LdNode.getOpcode() == ISD::BITCAST) {
23097 // Don't duplicate a load with other uses.
23098 if (!LdNode.getNode()->hasNUsesOfValue(AllowedUses, 0))
23101 AllowedUses = 1; // only allow 1 load use if we have a bitcast
23102 LdNode = LdNode.getOperand(0);
23105 if (!ISD::isNormalLoad(LdNode.getNode()))
23108 LoadSDNode *LN0 = cast<LoadSDNode>(LdNode);
23110 if (!LN0 ||!LN0->hasNUsesOfValue(AllowedUses, 0) || LN0->isVolatile())
23113 EVT EltVT = N->getValueType(0);
23114 // If there's a bitcast before the shuffle, check if the load type and
23115 // alignment is valid.
23116 unsigned Align = LN0->getAlignment();
23117 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23118 unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
23119 EltVT.getTypeForEVT(*DAG.getContext()));
23121 if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
23124 // All checks match so transform back to vector_shuffle so that DAG combiner
23125 // can finish the job
23128 // Create shuffle node taking into account the case that its a unary shuffle
23129 SDValue Shuffle = (UnaryShuffle) ? DAG.getUNDEF(CurrentVT)
23130 : InVec.getOperand(1);
23131 Shuffle = DAG.getVectorShuffle(CurrentVT, dl,
23132 InVec.getOperand(0), Shuffle,
23134 Shuffle = DAG.getNode(ISD::BITCAST, dl, OriginalVT, Shuffle);
23135 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), Shuffle,
23139 /// \brief Detect bitcasts between i32 to x86mmx low word. Since MMX types are
23140 /// special and don't usually play with other vector types, it's better to
23141 /// handle them early to be sure we emit efficient code by avoiding
23142 /// store-load conversions.
23143 static SDValue PerformBITCASTCombine(SDNode *N, SelectionDAG &DAG) {
23144 if (N->getValueType(0) != MVT::x86mmx ||
23145 N->getOperand(0)->getOpcode() != ISD::BUILD_VECTOR ||
23146 N->getOperand(0)->getValueType(0) != MVT::v2i32)
23149 SDValue V = N->getOperand(0);
23150 ConstantSDNode *C = dyn_cast<ConstantSDNode>(V.getOperand(1));
23151 if (C && C->getZExtValue() == 0 && V.getOperand(0).getValueType() == MVT::i32)
23152 return DAG.getNode(X86ISD::MMX_MOVW2D, SDLoc(V.getOperand(0)),
23153 N->getValueType(0), V.getOperand(0));
23158 /// PerformEXTRACT_VECTOR_ELTCombine - Detect vector gather/scatter index
23159 /// generation and convert it from being a bunch of shuffles and extracts
23160 /// into a somewhat faster sequence. For i686, the best sequence is apparently
23161 /// storing the value and loading scalars back, while for x64 we should
23162 /// use 64-bit extracts and shifts.
23163 static SDValue PerformEXTRACT_VECTOR_ELTCombine(SDNode *N, SelectionDAG &DAG,
23164 TargetLowering::DAGCombinerInfo &DCI) {
23165 SDValue NewOp = XFormVExtractWithShuffleIntoLoad(N, DAG, DCI);
23166 if (NewOp.getNode())
23169 SDValue InputVector = N->getOperand(0);
23171 // Detect mmx to i32 conversion through a v2i32 elt extract.
23172 if (InputVector.getOpcode() == ISD::BITCAST && InputVector.hasOneUse() &&
23173 N->getValueType(0) == MVT::i32 &&
23174 InputVector.getValueType() == MVT::v2i32) {
23176 // The bitcast source is a direct mmx result.
23177 SDValue MMXSrc = InputVector.getNode()->getOperand(0);
23178 if (MMXSrc.getValueType() == MVT::x86mmx)
23179 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23180 N->getValueType(0),
23181 InputVector.getNode()->getOperand(0));
23183 // The mmx is indirect: (i64 extract_elt (v1i64 bitcast (x86mmx ...))).
23184 SDValue MMXSrcOp = MMXSrc.getOperand(0);
23185 if (MMXSrc.getOpcode() == ISD::EXTRACT_VECTOR_ELT && MMXSrc.hasOneUse() &&
23186 MMXSrc.getValueType() == MVT::i64 && MMXSrcOp.hasOneUse() &&
23187 MMXSrcOp.getOpcode() == ISD::BITCAST &&
23188 MMXSrcOp.getValueType() == MVT::v1i64 &&
23189 MMXSrcOp.getOperand(0).getValueType() == MVT::x86mmx)
23190 return DAG.getNode(X86ISD::MMX_MOVD2W, SDLoc(InputVector),
23191 N->getValueType(0),
23192 MMXSrcOp.getOperand(0));
23195 // Only operate on vectors of 4 elements, where the alternative shuffling
23196 // gets to be more expensive.
23197 if (InputVector.getValueType() != MVT::v4i32)
23200 // Check whether every use of InputVector is an EXTRACT_VECTOR_ELT with a
23201 // single use which is a sign-extend or zero-extend, and all elements are
23203 SmallVector<SDNode *, 4> Uses;
23204 unsigned ExtractedElements = 0;
23205 for (SDNode::use_iterator UI = InputVector.getNode()->use_begin(),
23206 UE = InputVector.getNode()->use_end(); UI != UE; ++UI) {
23207 if (UI.getUse().getResNo() != InputVector.getResNo())
23210 SDNode *Extract = *UI;
23211 if (Extract->getOpcode() != ISD::EXTRACT_VECTOR_ELT)
23214 if (Extract->getValueType(0) != MVT::i32)
23216 if (!Extract->hasOneUse())
23218 if (Extract->use_begin()->getOpcode() != ISD::SIGN_EXTEND &&
23219 Extract->use_begin()->getOpcode() != ISD::ZERO_EXTEND)
23221 if (!isa<ConstantSDNode>(Extract->getOperand(1)))
23224 // Record which element was extracted.
23225 ExtractedElements |=
23226 1 << cast<ConstantSDNode>(Extract->getOperand(1))->getZExtValue();
23228 Uses.push_back(Extract);
23231 // If not all the elements were used, this may not be worthwhile.
23232 if (ExtractedElements != 15)
23235 // Ok, we've now decided to do the transformation.
23236 // If 64-bit shifts are legal, use the extract-shift sequence,
23237 // otherwise bounce the vector off the cache.
23238 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23240 SDLoc dl(InputVector);
23242 if (TLI.isOperationLegal(ISD::SRA, MVT::i64)) {
23243 SDValue Cst = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, InputVector);
23244 EVT VecIdxTy = DAG.getTargetLoweringInfo().getVectorIdxTy();
23245 SDValue BottomHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23246 DAG.getConstant(0, VecIdxTy));
23247 SDValue TopHalf = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, MVT::i64, Cst,
23248 DAG.getConstant(1, VecIdxTy));
23250 SDValue ShAmt = DAG.getConstant(32,
23251 DAG.getTargetLoweringInfo().getShiftAmountTy(MVT::i64));
23252 Vals[0] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, BottomHalf);
23253 Vals[1] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23254 DAG.getNode(ISD::SRA, dl, MVT::i64, BottomHalf, ShAmt));
23255 Vals[2] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, TopHalf);
23256 Vals[3] = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32,
23257 DAG.getNode(ISD::SRA, dl, MVT::i64, TopHalf, ShAmt));
23259 // Store the value to a temporary stack slot.
23260 SDValue StackPtr = DAG.CreateStackTemporary(InputVector.getValueType());
23261 SDValue Ch = DAG.getStore(DAG.getEntryNode(), dl, InputVector, StackPtr,
23262 MachinePointerInfo(), false, false, 0);
23264 EVT ElementType = InputVector.getValueType().getVectorElementType();
23265 unsigned EltSize = ElementType.getSizeInBits() / 8;
23267 // Replace each use (extract) with a load of the appropriate element.
23268 for (unsigned i = 0; i < 4; ++i) {
23269 uint64_t Offset = EltSize * i;
23270 SDValue OffsetVal = DAG.getConstant(Offset, TLI.getPointerTy());
23272 SDValue ScalarAddr = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(),
23273 StackPtr, OffsetVal);
23275 // Load the scalar.
23276 Vals[i] = DAG.getLoad(ElementType, dl, Ch,
23277 ScalarAddr, MachinePointerInfo(),
23278 false, false, false, 0);
23283 // Replace the extracts
23284 for (SmallVectorImpl<SDNode *>::iterator UI = Uses.begin(),
23285 UE = Uses.end(); UI != UE; ++UI) {
23286 SDNode *Extract = *UI;
23288 SDValue Idx = Extract->getOperand(1);
23289 uint64_t IdxVal = cast<ConstantSDNode>(Idx)->getZExtValue();
23290 DAG.ReplaceAllUsesOfValueWith(SDValue(Extract, 0), Vals[IdxVal]);
23293 // The replacement was made in place; don't return anything.
23297 /// \brief Matches a VSELECT onto min/max or return 0 if the node doesn't match.
23298 static std::pair<unsigned, bool>
23299 matchIntegerMINMAX(SDValue Cond, EVT VT, SDValue LHS, SDValue RHS,
23300 SelectionDAG &DAG, const X86Subtarget *Subtarget) {
23301 if (!VT.isVector())
23302 return std::make_pair(0, false);
23304 bool NeedSplit = false;
23305 switch (VT.getSimpleVT().SimpleTy) {
23306 default: return std::make_pair(0, false);
23309 if (!Subtarget->hasVLX())
23310 return std::make_pair(0, false);
23314 if (!Subtarget->hasBWI())
23315 return std::make_pair(0, false);
23319 if (!Subtarget->hasAVX512())
23320 return std::make_pair(0, false);
23325 if (!Subtarget->hasAVX2())
23327 if (!Subtarget->hasAVX())
23328 return std::make_pair(0, false);
23333 if (!Subtarget->hasSSE2())
23334 return std::make_pair(0, false);
23337 // SSE2 has only a small subset of the operations.
23338 bool hasUnsigned = Subtarget->hasSSE41() ||
23339 (Subtarget->hasSSE2() && VT == MVT::v16i8);
23340 bool hasSigned = Subtarget->hasSSE41() ||
23341 (Subtarget->hasSSE2() && VT == MVT::v8i16);
23343 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23346 // Check for x CC y ? x : y.
23347 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23348 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23353 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23356 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23359 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23362 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23364 // Check for x CC y ? y : x -- a min/max with reversed arms.
23365 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23366 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23371 Opc = hasUnsigned ? X86ISD::UMAX : 0; break;
23374 Opc = hasUnsigned ? X86ISD::UMIN : 0; break;
23377 Opc = hasSigned ? X86ISD::SMAX : 0; break;
23380 Opc = hasSigned ? X86ISD::SMIN : 0; break;
23384 return std::make_pair(Opc, NeedSplit);
23388 transformVSELECTtoBlendVECTOR_SHUFFLE(SDNode *N, SelectionDAG &DAG,
23389 const X86Subtarget *Subtarget) {
23391 SDValue Cond = N->getOperand(0);
23392 SDValue LHS = N->getOperand(1);
23393 SDValue RHS = N->getOperand(2);
23395 if (Cond.getOpcode() == ISD::SIGN_EXTEND) {
23396 SDValue CondSrc = Cond->getOperand(0);
23397 if (CondSrc->getOpcode() == ISD::SIGN_EXTEND_INREG)
23398 Cond = CondSrc->getOperand(0);
23401 if (!ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
23404 // A vselect where all conditions and data are constants can be optimized into
23405 // a single vector load by SelectionDAGLegalize::ExpandBUILD_VECTOR().
23406 if (ISD::isBuildVectorOfConstantSDNodes(LHS.getNode()) &&
23407 ISD::isBuildVectorOfConstantSDNodes(RHS.getNode()))
23410 unsigned MaskValue = 0;
23411 if (!BUILD_VECTORtoBlendMask(cast<BuildVectorSDNode>(Cond), MaskValue))
23414 MVT VT = N->getSimpleValueType(0);
23415 unsigned NumElems = VT.getVectorNumElements();
23416 SmallVector<int, 8> ShuffleMask(NumElems, -1);
23417 for (unsigned i = 0; i < NumElems; ++i) {
23418 // Be sure we emit undef where we can.
23419 if (Cond.getOperand(i)->getOpcode() == ISD::UNDEF)
23420 ShuffleMask[i] = -1;
23422 ShuffleMask[i] = i + NumElems * ((MaskValue >> i) & 1);
23425 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23426 if (!TLI.isShuffleMaskLegal(ShuffleMask, VT))
23428 return DAG.getVectorShuffle(VT, dl, LHS, RHS, &ShuffleMask[0]);
23431 /// PerformSELECTCombine - Do target-specific dag combines on SELECT and VSELECT
23433 static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
23434 TargetLowering::DAGCombinerInfo &DCI,
23435 const X86Subtarget *Subtarget) {
23437 SDValue Cond = N->getOperand(0);
23438 // Get the LHS/RHS of the select.
23439 SDValue LHS = N->getOperand(1);
23440 SDValue RHS = N->getOperand(2);
23441 EVT VT = LHS.getValueType();
23442 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
23444 // If we have SSE[12] support, try to form min/max nodes. SSE min/max
23445 // instructions match the semantics of the common C idiom x<y?x:y but not
23446 // x<=y?x:y, because of how they handle negative zero (which can be
23447 // ignored in unsafe-math mode).
23448 // We also try to create v2f32 min/max nodes, which we later widen to v4f32.
23449 if (Cond.getOpcode() == ISD::SETCC && VT.isFloatingPoint() &&
23450 VT != MVT::f80 && (TLI.isTypeLegal(VT) || VT == MVT::v2f32) &&
23451 (Subtarget->hasSSE2() ||
23452 (Subtarget->hasSSE1() && VT.getScalarType() == MVT::f32))) {
23453 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23455 unsigned Opcode = 0;
23456 // Check for x CC y ? x : y.
23457 if (DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23458 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23462 // Converting this to a min would handle NaNs incorrectly, and swapping
23463 // the operands would cause it to handle comparisons between positive
23464 // and negative zero incorrectly.
23465 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23466 if (!DAG.getTarget().Options.UnsafeFPMath &&
23467 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23469 std::swap(LHS, RHS);
23471 Opcode = X86ISD::FMIN;
23474 // Converting this to a min would handle comparisons between positive
23475 // and negative zero incorrectly.
23476 if (!DAG.getTarget().Options.UnsafeFPMath &&
23477 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23479 Opcode = X86ISD::FMIN;
23482 // Converting this to a min would handle both negative zeros and NaNs
23483 // incorrectly, but we can swap the operands to fix both.
23484 std::swap(LHS, RHS);
23488 Opcode = X86ISD::FMIN;
23492 // Converting this to a max would handle comparisons between positive
23493 // and negative zero incorrectly.
23494 if (!DAG.getTarget().Options.UnsafeFPMath &&
23495 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS))
23497 Opcode = X86ISD::FMAX;
23500 // Converting this to a max would handle NaNs incorrectly, and swapping
23501 // the operands would cause it to handle comparisons between positive
23502 // and negative zero incorrectly.
23503 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)) {
23504 if (!DAG.getTarget().Options.UnsafeFPMath &&
23505 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS)))
23507 std::swap(LHS, RHS);
23509 Opcode = X86ISD::FMAX;
23512 // Converting this to a max would handle both negative zeros and NaNs
23513 // incorrectly, but we can swap the operands to fix both.
23514 std::swap(LHS, RHS);
23518 Opcode = X86ISD::FMAX;
23521 // Check for x CC y ? y : x -- a min/max with reversed arms.
23522 } else if (DAG.isEqualTo(LHS, Cond.getOperand(1)) &&
23523 DAG.isEqualTo(RHS, Cond.getOperand(0))) {
23527 // Converting this to a min would handle comparisons between positive
23528 // and negative zero incorrectly, and swapping the operands would
23529 // cause it to handle NaNs incorrectly.
23530 if (!DAG.getTarget().Options.UnsafeFPMath &&
23531 !(DAG.isKnownNeverZero(LHS) || DAG.isKnownNeverZero(RHS))) {
23532 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23534 std::swap(LHS, RHS);
23536 Opcode = X86ISD::FMIN;
23539 // Converting this to a min would handle NaNs incorrectly.
23540 if (!DAG.getTarget().Options.UnsafeFPMath &&
23541 (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS)))
23543 Opcode = X86ISD::FMIN;
23546 // Converting this to a min would handle both negative zeros and NaNs
23547 // incorrectly, but we can swap the operands to fix both.
23548 std::swap(LHS, RHS);
23552 Opcode = X86ISD::FMIN;
23556 // Converting this to a max would handle NaNs incorrectly.
23557 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23559 Opcode = X86ISD::FMAX;
23562 // Converting this to a max would handle comparisons between positive
23563 // and negative zero incorrectly, and swapping the operands would
23564 // cause it to handle NaNs incorrectly.
23565 if (!DAG.getTarget().Options.UnsafeFPMath &&
23566 !DAG.isKnownNeverZero(LHS) && !DAG.isKnownNeverZero(RHS)) {
23567 if (!DAG.isKnownNeverNaN(LHS) || !DAG.isKnownNeverNaN(RHS))
23569 std::swap(LHS, RHS);
23571 Opcode = X86ISD::FMAX;
23574 // Converting this to a max would handle both negative zeros and NaNs
23575 // incorrectly, but we can swap the operands to fix both.
23576 std::swap(LHS, RHS);
23580 Opcode = X86ISD::FMAX;
23586 return DAG.getNode(Opcode, DL, N->getValueType(0), LHS, RHS);
23589 EVT CondVT = Cond.getValueType();
23590 if (Subtarget->hasAVX512() && VT.isVector() && CondVT.isVector() &&
23591 CondVT.getVectorElementType() == MVT::i1) {
23592 // v16i8 (select v16i1, v16i8, v16i8) does not have a proper
23593 // lowering on KNL. In this case we convert it to
23594 // v16i8 (select v16i8, v16i8, v16i8) and use AVX instruction.
23595 // The same situation for all 128 and 256-bit vectors of i8 and i16.
23596 // Since SKX these selects have a proper lowering.
23597 EVT OpVT = LHS.getValueType();
23598 if ((OpVT.is128BitVector() || OpVT.is256BitVector()) &&
23599 (OpVT.getVectorElementType() == MVT::i8 ||
23600 OpVT.getVectorElementType() == MVT::i16) &&
23601 !(Subtarget->hasBWI() && Subtarget->hasVLX())) {
23602 Cond = DAG.getNode(ISD::SIGN_EXTEND, DL, OpVT, Cond);
23603 DCI.AddToWorklist(Cond.getNode());
23604 return DAG.getNode(N->getOpcode(), DL, OpVT, Cond, LHS, RHS);
23607 // If this is a select between two integer constants, try to do some
23609 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(LHS)) {
23610 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(RHS))
23611 // Don't do this for crazy integer types.
23612 if (DAG.getTargetLoweringInfo().isTypeLegal(LHS.getValueType())) {
23613 // If this is efficiently invertible, canonicalize the LHSC/RHSC values
23614 // so that TrueC (the true value) is larger than FalseC.
23615 bool NeedsCondInvert = false;
23617 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue()) &&
23618 // Efficiently invertible.
23619 (Cond.getOpcode() == ISD::SETCC || // setcc -> invertible.
23620 (Cond.getOpcode() == ISD::XOR && // xor(X, C) -> invertible.
23621 isa<ConstantSDNode>(Cond.getOperand(1))))) {
23622 NeedsCondInvert = true;
23623 std::swap(TrueC, FalseC);
23626 // Optimize C ? 8 : 0 -> zext(C) << 3. Likewise for any pow2/0.
23627 if (FalseC->getAPIntValue() == 0 &&
23628 TrueC->getAPIntValue().isPowerOf2()) {
23629 if (NeedsCondInvert) // Invert the condition if needed.
23630 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23631 DAG.getConstant(1, Cond.getValueType()));
23633 // Zero extend the condition if needed.
23634 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, LHS.getValueType(), Cond);
23636 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
23637 return DAG.getNode(ISD::SHL, DL, LHS.getValueType(), Cond,
23638 DAG.getConstant(ShAmt, MVT::i8));
23641 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst.
23642 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
23643 if (NeedsCondInvert) // Invert the condition if needed.
23644 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23645 DAG.getConstant(1, Cond.getValueType()));
23647 // Zero extend the condition if needed.
23648 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
23649 FalseC->getValueType(0), Cond);
23650 return DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23651 SDValue(FalseC, 0));
23654 // Optimize cases that will turn into an LEA instruction. This requires
23655 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
23656 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
23657 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
23658 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
23660 bool isFastMultiplier = false;
23662 switch ((unsigned char)Diff) {
23664 case 1: // result = add base, cond
23665 case 2: // result = lea base( , cond*2)
23666 case 3: // result = lea base(cond, cond*2)
23667 case 4: // result = lea base( , cond*4)
23668 case 5: // result = lea base(cond, cond*4)
23669 case 8: // result = lea base( , cond*8)
23670 case 9: // result = lea base(cond, cond*8)
23671 isFastMultiplier = true;
23676 if (isFastMultiplier) {
23677 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
23678 if (NeedsCondInvert) // Invert the condition if needed.
23679 Cond = DAG.getNode(ISD::XOR, DL, Cond.getValueType(), Cond,
23680 DAG.getConstant(1, Cond.getValueType()));
23682 // Zero extend the condition if needed.
23683 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
23685 // Scale the condition by the difference.
23687 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
23688 DAG.getConstant(Diff, Cond.getValueType()));
23690 // Add the base if non-zero.
23691 if (FalseC->getAPIntValue() != 0)
23692 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
23693 SDValue(FalseC, 0));
23700 // Canonicalize max and min:
23701 // (x > y) ? x : y -> (x >= y) ? x : y
23702 // (x < y) ? x : y -> (x <= y) ? x : y
23703 // This allows use of COND_S / COND_NS (see TranslateX86CC) which eliminates
23704 // the need for an extra compare
23705 // against zero. e.g.
23706 // (x - y) > 0 : (x - y) ? 0 -> (x - y) >= 0 : (x - y) ? 0
23708 // testl %edi, %edi
23710 // cmovgl %edi, %eax
23714 // cmovsl %eax, %edi
23715 if (N->getOpcode() == ISD::SELECT && Cond.getOpcode() == ISD::SETCC &&
23716 DAG.isEqualTo(LHS, Cond.getOperand(0)) &&
23717 DAG.isEqualTo(RHS, Cond.getOperand(1))) {
23718 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23723 ISD::CondCode NewCC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGE;
23724 Cond = DAG.getSetCC(SDLoc(Cond), Cond.getValueType(),
23725 Cond.getOperand(0), Cond.getOperand(1), NewCC);
23726 return DAG.getNode(ISD::SELECT, DL, VT, Cond, LHS, RHS);
23731 // Early exit check
23732 if (!TLI.isTypeLegal(VT))
23735 // Match VSELECTs into subs with unsigned saturation.
23736 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC &&
23737 // psubus is available in SSE2 and AVX2 for i8 and i16 vectors.
23738 ((Subtarget->hasSSE2() && (VT == MVT::v16i8 || VT == MVT::v8i16)) ||
23739 (Subtarget->hasAVX2() && (VT == MVT::v32i8 || VT == MVT::v16i16)))) {
23740 ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
23742 // Check if one of the arms of the VSELECT is a zero vector. If it's on the
23743 // left side invert the predicate to simplify logic below.
23745 if (ISD::isBuildVectorAllZeros(LHS.getNode())) {
23747 CC = ISD::getSetCCInverse(CC, true);
23748 } else if (ISD::isBuildVectorAllZeros(RHS.getNode())) {
23752 if (Other.getNode() && Other->getNumOperands() == 2 &&
23753 DAG.isEqualTo(Other->getOperand(0), Cond.getOperand(0))) {
23754 SDValue OpLHS = Other->getOperand(0), OpRHS = Other->getOperand(1);
23755 SDValue CondRHS = Cond->getOperand(1);
23757 // Look for a general sub with unsigned saturation first.
23758 // x >= y ? x-y : 0 --> subus x, y
23759 // x > y ? x-y : 0 --> subus x, y
23760 if ((CC == ISD::SETUGE || CC == ISD::SETUGT) &&
23761 Other->getOpcode() == ISD::SUB && DAG.isEqualTo(OpRHS, CondRHS))
23762 return DAG.getNode(X86ISD::SUBUS, DL, VT, OpLHS, OpRHS);
23764 if (auto *OpRHSBV = dyn_cast<BuildVectorSDNode>(OpRHS))
23765 if (auto *OpRHSConst = OpRHSBV->getConstantSplatNode()) {
23766 if (auto *CondRHSBV = dyn_cast<BuildVectorSDNode>(CondRHS))
23767 if (auto *CondRHSConst = CondRHSBV->getConstantSplatNode())
23768 // If the RHS is a constant we have to reverse the const
23769 // canonicalization.
23770 // x > C-1 ? x+-C : 0 --> subus x, C
23771 if (CC == ISD::SETUGT && Other->getOpcode() == ISD::ADD &&
23772 CondRHSConst->getAPIntValue() ==
23773 (-OpRHSConst->getAPIntValue() - 1))
23774 return DAG.getNode(
23775 X86ISD::SUBUS, DL, VT, OpLHS,
23776 DAG.getConstant(-OpRHSConst->getAPIntValue(), VT));
23778 // Another special case: If C was a sign bit, the sub has been
23779 // canonicalized into a xor.
23780 // FIXME: Would it be better to use computeKnownBits to determine
23781 // whether it's safe to decanonicalize the xor?
23782 // x s< 0 ? x^C : 0 --> subus x, C
23783 if (CC == ISD::SETLT && Other->getOpcode() == ISD::XOR &&
23784 ISD::isBuildVectorAllZeros(CondRHS.getNode()) &&
23785 OpRHSConst->getAPIntValue().isSignBit())
23786 // Note that we have to rebuild the RHS constant here to ensure we
23787 // don't rely on particular values of undef lanes.
23788 return DAG.getNode(
23789 X86ISD::SUBUS, DL, VT, OpLHS,
23790 DAG.getConstant(OpRHSConst->getAPIntValue(), VT));
23795 // Try to match a min/max vector operation.
23796 if (N->getOpcode() == ISD::VSELECT && Cond.getOpcode() == ISD::SETCC) {
23797 std::pair<unsigned, bool> ret = matchIntegerMINMAX(Cond, VT, LHS, RHS, DAG, Subtarget);
23798 unsigned Opc = ret.first;
23799 bool NeedSplit = ret.second;
23801 if (Opc && NeedSplit) {
23802 unsigned NumElems = VT.getVectorNumElements();
23803 // Extract the LHS vectors
23804 SDValue LHS1 = Extract128BitVector(LHS, 0, DAG, DL);
23805 SDValue LHS2 = Extract128BitVector(LHS, NumElems/2, DAG, DL);
23807 // Extract the RHS vectors
23808 SDValue RHS1 = Extract128BitVector(RHS, 0, DAG, DL);
23809 SDValue RHS2 = Extract128BitVector(RHS, NumElems/2, DAG, DL);
23811 // Create min/max for each subvector
23812 LHS = DAG.getNode(Opc, DL, LHS1.getValueType(), LHS1, RHS1);
23813 RHS = DAG.getNode(Opc, DL, LHS2.getValueType(), LHS2, RHS2);
23815 // Merge the result
23816 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS, RHS);
23818 return DAG.getNode(Opc, DL, VT, LHS, RHS);
23821 // Simplify vector selection if condition value type matches vselect
23823 if (N->getOpcode() == ISD::VSELECT && CondVT == VT) {
23824 assert(Cond.getValueType().isVector() &&
23825 "vector select expects a vector selector!");
23827 bool TValIsAllOnes = ISD::isBuildVectorAllOnes(LHS.getNode());
23828 bool FValIsAllZeros = ISD::isBuildVectorAllZeros(RHS.getNode());
23830 // Try invert the condition if true value is not all 1s and false value
23832 if (!TValIsAllOnes && !FValIsAllZeros &&
23833 // Check if the selector will be produced by CMPP*/PCMP*
23834 Cond.getOpcode() == ISD::SETCC &&
23835 // Check if SETCC has already been promoted
23836 TLI.getSetCCResultType(*DAG.getContext(), VT) == CondVT) {
23837 bool TValIsAllZeros = ISD::isBuildVectorAllZeros(LHS.getNode());
23838 bool FValIsAllOnes = ISD::isBuildVectorAllOnes(RHS.getNode());
23840 if (TValIsAllZeros || FValIsAllOnes) {
23841 SDValue CC = Cond.getOperand(2);
23842 ISD::CondCode NewCC =
23843 ISD::getSetCCInverse(cast<CondCodeSDNode>(CC)->get(),
23844 Cond.getOperand(0).getValueType().isInteger());
23845 Cond = DAG.getSetCC(DL, CondVT, Cond.getOperand(0), Cond.getOperand(1), NewCC);
23846 std::swap(LHS, RHS);
23847 TValIsAllOnes = FValIsAllOnes;
23848 FValIsAllZeros = TValIsAllZeros;
23852 if (TValIsAllOnes || FValIsAllZeros) {
23855 if (TValIsAllOnes && FValIsAllZeros)
23857 else if (TValIsAllOnes)
23858 Ret = DAG.getNode(ISD::OR, DL, CondVT, Cond,
23859 DAG.getNode(ISD::BITCAST, DL, CondVT, RHS));
23860 else if (FValIsAllZeros)
23861 Ret = DAG.getNode(ISD::AND, DL, CondVT, Cond,
23862 DAG.getNode(ISD::BITCAST, DL, CondVT, LHS));
23864 return DAG.getNode(ISD::BITCAST, DL, VT, Ret);
23868 // If we know that this node is legal then we know that it is going to be
23869 // matched by one of the SSE/AVX BLEND instructions. These instructions only
23870 // depend on the highest bit in each word. Try to use SimplifyDemandedBits
23871 // to simplify previous instructions.
23872 if (N->getOpcode() == ISD::VSELECT && DCI.isBeforeLegalizeOps() &&
23873 !DCI.isBeforeLegalize() &&
23874 // We explicitly check against v8i16 and v16i16 because, although
23875 // they're marked as Custom, they might only be legal when Cond is a
23876 // build_vector of constants. This will be taken care in a later
23878 (TLI.isOperationLegalOrCustom(ISD::VSELECT, VT) && VT != MVT::v16i16 &&
23879 VT != MVT::v8i16) &&
23880 // Don't optimize vector of constants. Those are handled by
23881 // the generic code and all the bits must be properly set for
23882 // the generic optimizer.
23883 !ISD::isBuildVectorOfConstantSDNodes(Cond.getNode())) {
23884 unsigned BitWidth = Cond.getValueType().getScalarType().getSizeInBits();
23886 // Don't optimize vector selects that map to mask-registers.
23890 assert(BitWidth >= 8 && BitWidth <= 64 && "Invalid mask size");
23891 APInt DemandedMask = APInt::getHighBitsSet(BitWidth, 1);
23893 APInt KnownZero, KnownOne;
23894 TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(),
23895 DCI.isBeforeLegalizeOps());
23896 if (TLO.ShrinkDemandedConstant(Cond, DemandedMask) ||
23897 TLI.SimplifyDemandedBits(Cond, DemandedMask, KnownZero, KnownOne,
23899 // If we changed the computation somewhere in the DAG, this change
23900 // will affect all users of Cond.
23901 // Make sure it is fine and update all the nodes so that we do not
23902 // use the generic VSELECT anymore. Otherwise, we may perform
23903 // wrong optimizations as we messed up with the actual expectation
23904 // for the vector boolean values.
23905 if (Cond != TLO.Old) {
23906 // Check all uses of that condition operand to check whether it will be
23907 // consumed by non-BLEND instructions, which may depend on all bits are
23909 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23911 if (I->getOpcode() != ISD::VSELECT)
23912 // TODO: Add other opcodes eventually lowered into BLEND.
23915 // Update all the users of the condition, before committing the change,
23916 // so that the VSELECT optimizations that expect the correct vector
23917 // boolean value will not be triggered.
23918 for (SDNode::use_iterator I = Cond->use_begin(), E = Cond->use_end();
23920 DAG.ReplaceAllUsesOfValueWith(
23922 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(*I), I->getValueType(0),
23923 Cond, I->getOperand(1), I->getOperand(2)));
23924 DCI.CommitTargetLoweringOpt(TLO);
23927 // At this point, only Cond is changed. Change the condition
23928 // just for N to keep the opportunity to optimize all other
23929 // users their own way.
23930 DAG.ReplaceAllUsesOfValueWith(
23932 DAG.getNode(X86ISD::SHRUNKBLEND, SDLoc(N), N->getValueType(0),
23933 TLO.New, N->getOperand(1), N->getOperand(2)));
23938 // We should generate an X86ISD::BLENDI from a vselect if its argument
23939 // is a sign_extend_inreg of an any_extend of a BUILD_VECTOR of
23940 // constants. This specific pattern gets generated when we split a
23941 // selector for a 512 bit vector in a machine without AVX512 (but with
23942 // 256-bit vectors), during legalization:
23944 // (vselect (sign_extend (any_extend (BUILD_VECTOR)) i1) LHS RHS)
23946 // Iff we find this pattern and the build_vectors are built from
23947 // constants, we translate the vselect into a shuffle_vector that we
23948 // know will be matched by LowerVECTOR_SHUFFLEtoBlend.
23949 if ((N->getOpcode() == ISD::VSELECT ||
23950 N->getOpcode() == X86ISD::SHRUNKBLEND) &&
23951 !DCI.isBeforeLegalize()) {
23952 SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
23953 if (Shuffle.getNode())
23960 // Check whether a boolean test is testing a boolean value generated by
23961 // X86ISD::SETCC. If so, return the operand of that SETCC and proper condition
23964 // Simplify the following patterns:
23965 // (Op (CMP (SETCC Cond EFLAGS) 1) EQ) or
23966 // (Op (CMP (SETCC Cond EFLAGS) 0) NEQ)
23967 // to (Op EFLAGS Cond)
23969 // (Op (CMP (SETCC Cond EFLAGS) 0) EQ) or
23970 // (Op (CMP (SETCC Cond EFLAGS) 1) NEQ)
23971 // to (Op EFLAGS !Cond)
23973 // where Op could be BRCOND or CMOV.
23975 static SDValue checkBoolTestSetCCCombine(SDValue Cmp, X86::CondCode &CC) {
23976 // Quit if not CMP and SUB with its value result used.
23977 if (Cmp.getOpcode() != X86ISD::CMP &&
23978 (Cmp.getOpcode() != X86ISD::SUB || Cmp.getNode()->hasAnyUseOfValue(0)))
23981 // Quit if not used as a boolean value.
23982 if (CC != X86::COND_E && CC != X86::COND_NE)
23985 // Check CMP operands. One of them should be 0 or 1 and the other should be
23986 // an SetCC or extended from it.
23987 SDValue Op1 = Cmp.getOperand(0);
23988 SDValue Op2 = Cmp.getOperand(1);
23991 const ConstantSDNode* C = nullptr;
23992 bool needOppositeCond = (CC == X86::COND_E);
23993 bool checkAgainstTrue = false; // Is it a comparison against 1?
23995 if ((C = dyn_cast<ConstantSDNode>(Op1)))
23997 else if ((C = dyn_cast<ConstantSDNode>(Op2)))
23999 else // Quit if all operands are not constants.
24002 if (C->getZExtValue() == 1) {
24003 needOppositeCond = !needOppositeCond;
24004 checkAgainstTrue = true;
24005 } else if (C->getZExtValue() != 0)
24006 // Quit if the constant is neither 0 or 1.
24009 bool truncatedToBoolWithAnd = false;
24010 // Skip (zext $x), (trunc $x), or (and $x, 1) node.
24011 while (SetCC.getOpcode() == ISD::ZERO_EXTEND ||
24012 SetCC.getOpcode() == ISD::TRUNCATE ||
24013 SetCC.getOpcode() == ISD::AND) {
24014 if (SetCC.getOpcode() == ISD::AND) {
24016 ConstantSDNode *CS;
24017 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(0))) &&
24018 CS->getZExtValue() == 1)
24020 if ((CS = dyn_cast<ConstantSDNode>(SetCC.getOperand(1))) &&
24021 CS->getZExtValue() == 1)
24025 SetCC = SetCC.getOperand(OpIdx);
24026 truncatedToBoolWithAnd = true;
24028 SetCC = SetCC.getOperand(0);
24031 switch (SetCC.getOpcode()) {
24032 case X86ISD::SETCC_CARRY:
24033 // Since SETCC_CARRY gives output based on R = CF ? ~0 : 0, it's unsafe to
24034 // simplify it if the result of SETCC_CARRY is not canonicalized to 0 or 1,
24035 // i.e. it's a comparison against true but the result of SETCC_CARRY is not
24036 // truncated to i1 using 'and'.
24037 if (checkAgainstTrue && !truncatedToBoolWithAnd)
24039 assert(X86::CondCode(SetCC.getConstantOperandVal(0)) == X86::COND_B &&
24040 "Invalid use of SETCC_CARRY!");
24042 case X86ISD::SETCC:
24043 // Set the condition code or opposite one if necessary.
24044 CC = X86::CondCode(SetCC.getConstantOperandVal(0));
24045 if (needOppositeCond)
24046 CC = X86::GetOppositeBranchCondition(CC);
24047 return SetCC.getOperand(1);
24048 case X86ISD::CMOV: {
24049 // Check whether false/true value has canonical one, i.e. 0 or 1.
24050 ConstantSDNode *FVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(0));
24051 ConstantSDNode *TVal = dyn_cast<ConstantSDNode>(SetCC.getOperand(1));
24052 // Quit if true value is not a constant.
24055 // Quit if false value is not a constant.
24057 SDValue Op = SetCC.getOperand(0);
24058 // Skip 'zext' or 'trunc' node.
24059 if (Op.getOpcode() == ISD::ZERO_EXTEND ||
24060 Op.getOpcode() == ISD::TRUNCATE)
24061 Op = Op.getOperand(0);
24062 // A special case for rdrand/rdseed, where 0 is set if false cond is
24064 if ((Op.getOpcode() != X86ISD::RDRAND &&
24065 Op.getOpcode() != X86ISD::RDSEED) || Op.getResNo() != 0)
24068 // Quit if false value is not the constant 0 or 1.
24069 bool FValIsFalse = true;
24070 if (FVal && FVal->getZExtValue() != 0) {
24071 if (FVal->getZExtValue() != 1)
24073 // If FVal is 1, opposite cond is needed.
24074 needOppositeCond = !needOppositeCond;
24075 FValIsFalse = false;
24077 // Quit if TVal is not the constant opposite of FVal.
24078 if (FValIsFalse && TVal->getZExtValue() != 1)
24080 if (!FValIsFalse && TVal->getZExtValue() != 0)
24082 CC = X86::CondCode(SetCC.getConstantOperandVal(2));
24083 if (needOppositeCond)
24084 CC = X86::GetOppositeBranchCondition(CC);
24085 return SetCC.getOperand(3);
24092 /// Optimize X86ISD::CMOV [LHS, RHS, CONDCODE (e.g. X86::COND_NE), CONDVAL]
24093 static SDValue PerformCMOVCombine(SDNode *N, SelectionDAG &DAG,
24094 TargetLowering::DAGCombinerInfo &DCI,
24095 const X86Subtarget *Subtarget) {
24098 // If the flag operand isn't dead, don't touch this CMOV.
24099 if (N->getNumValues() == 2 && !SDValue(N, 1).use_empty())
24102 SDValue FalseOp = N->getOperand(0);
24103 SDValue TrueOp = N->getOperand(1);
24104 X86::CondCode CC = (X86::CondCode)N->getConstantOperandVal(2);
24105 SDValue Cond = N->getOperand(3);
24107 if (CC == X86::COND_E || CC == X86::COND_NE) {
24108 switch (Cond.getOpcode()) {
24112 // If operand of BSR / BSF are proven never zero, then ZF cannot be set.
24113 if (DAG.isKnownNeverZero(Cond.getOperand(0)))
24114 return (CC == X86::COND_E) ? FalseOp : TrueOp;
24120 Flags = checkBoolTestSetCCCombine(Cond, CC);
24121 if (Flags.getNode() &&
24122 // Extra check as FCMOV only supports a subset of X86 cond.
24123 (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC))) {
24124 SDValue Ops[] = { FalseOp, TrueOp,
24125 DAG.getConstant(CC, MVT::i8), Flags };
24126 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList(), Ops);
24129 // If this is a select between two integer constants, try to do some
24130 // optimizations. Note that the operands are ordered the opposite of SELECT
24132 if (ConstantSDNode *TrueC = dyn_cast<ConstantSDNode>(TrueOp)) {
24133 if (ConstantSDNode *FalseC = dyn_cast<ConstantSDNode>(FalseOp)) {
24134 // Canonicalize the TrueC/FalseC values so that TrueC (the true value) is
24135 // larger than FalseC (the false value).
24136 if (TrueC->getAPIntValue().ult(FalseC->getAPIntValue())) {
24137 CC = X86::GetOppositeBranchCondition(CC);
24138 std::swap(TrueC, FalseC);
24139 std::swap(TrueOp, FalseOp);
24142 // Optimize C ? 8 : 0 -> zext(setcc(C)) << 3. Likewise for any pow2/0.
24143 // This is efficient for any integer data type (including i8/i16) and
24145 if (FalseC->getAPIntValue() == 0 && TrueC->getAPIntValue().isPowerOf2()) {
24146 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24147 DAG.getConstant(CC, MVT::i8), Cond);
24149 // Zero extend the condition if needed.
24150 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, TrueC->getValueType(0), Cond);
24152 unsigned ShAmt = TrueC->getAPIntValue().logBase2();
24153 Cond = DAG.getNode(ISD::SHL, DL, Cond.getValueType(), Cond,
24154 DAG.getConstant(ShAmt, MVT::i8));
24155 if (N->getNumValues() == 2) // Dead flag value?
24156 return DCI.CombineTo(N, Cond, SDValue());
24160 // Optimize Cond ? cst+1 : cst -> zext(setcc(C)+cst. This is efficient
24161 // for any integer data type, including i8/i16.
24162 if (FalseC->getAPIntValue()+1 == TrueC->getAPIntValue()) {
24163 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24164 DAG.getConstant(CC, MVT::i8), Cond);
24166 // Zero extend the condition if needed.
24167 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL,
24168 FalseC->getValueType(0), Cond);
24169 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24170 SDValue(FalseC, 0));
24172 if (N->getNumValues() == 2) // Dead flag value?
24173 return DCI.CombineTo(N, Cond, SDValue());
24177 // Optimize cases that will turn into an LEA instruction. This requires
24178 // an i32 or i64 and an efficient multiplier (1, 2, 3, 4, 5, 8, 9).
24179 if (N->getValueType(0) == MVT::i32 || N->getValueType(0) == MVT::i64) {
24180 uint64_t Diff = TrueC->getZExtValue()-FalseC->getZExtValue();
24181 if (N->getValueType(0) == MVT::i32) Diff = (unsigned)Diff;
24183 bool isFastMultiplier = false;
24185 switch ((unsigned char)Diff) {
24187 case 1: // result = add base, cond
24188 case 2: // result = lea base( , cond*2)
24189 case 3: // result = lea base(cond, cond*2)
24190 case 4: // result = lea base( , cond*4)
24191 case 5: // result = lea base(cond, cond*4)
24192 case 8: // result = lea base( , cond*8)
24193 case 9: // result = lea base(cond, cond*8)
24194 isFastMultiplier = true;
24199 if (isFastMultiplier) {
24200 APInt Diff = TrueC->getAPIntValue()-FalseC->getAPIntValue();
24201 Cond = DAG.getNode(X86ISD::SETCC, DL, MVT::i8,
24202 DAG.getConstant(CC, MVT::i8), Cond);
24203 // Zero extend the condition if needed.
24204 Cond = DAG.getNode(ISD::ZERO_EXTEND, DL, FalseC->getValueType(0),
24206 // Scale the condition by the difference.
24208 Cond = DAG.getNode(ISD::MUL, DL, Cond.getValueType(), Cond,
24209 DAG.getConstant(Diff, Cond.getValueType()));
24211 // Add the base if non-zero.
24212 if (FalseC->getAPIntValue() != 0)
24213 Cond = DAG.getNode(ISD::ADD, DL, Cond.getValueType(), Cond,
24214 SDValue(FalseC, 0));
24215 if (N->getNumValues() == 2) // Dead flag value?
24216 return DCI.CombineTo(N, Cond, SDValue());
24223 // Handle these cases:
24224 // (select (x != c), e, c) -> select (x != c), e, x),
24225 // (select (x == c), c, e) -> select (x == c), x, e)
24226 // where the c is an integer constant, and the "select" is the combination
24227 // of CMOV and CMP.
24229 // The rationale for this change is that the conditional-move from a constant
24230 // needs two instructions, however, conditional-move from a register needs
24231 // only one instruction.
24233 // CAVEAT: By replacing a constant with a symbolic value, it may obscure
24234 // some instruction-combining opportunities. This opt needs to be
24235 // postponed as late as possible.
24237 if (!DCI.isBeforeLegalize() && !DCI.isBeforeLegalizeOps()) {
24238 // the DCI.xxxx conditions are provided to postpone the optimization as
24239 // late as possible.
24241 ConstantSDNode *CmpAgainst = nullptr;
24242 if ((Cond.getOpcode() == X86ISD::CMP || Cond.getOpcode() == X86ISD::SUB) &&
24243 (CmpAgainst = dyn_cast<ConstantSDNode>(Cond.getOperand(1))) &&
24244 !isa<ConstantSDNode>(Cond.getOperand(0))) {
24246 if (CC == X86::COND_NE &&
24247 CmpAgainst == dyn_cast<ConstantSDNode>(FalseOp)) {
24248 CC = X86::GetOppositeBranchCondition(CC);
24249 std::swap(TrueOp, FalseOp);
24252 if (CC == X86::COND_E &&
24253 CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
24254 SDValue Ops[] = { FalseOp, Cond.getOperand(0),
24255 DAG.getConstant(CC, MVT::i8), Cond };
24256 return DAG.getNode(X86ISD::CMOV, DL, N->getVTList (), Ops);
24264 static SDValue PerformINTRINSIC_WO_CHAINCombine(SDNode *N, SelectionDAG &DAG,
24265 const X86Subtarget *Subtarget) {
24266 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
24268 default: return SDValue();
24269 // SSE/AVX/AVX2 blend intrinsics.
24270 case Intrinsic::x86_avx2_pblendvb:
24271 case Intrinsic::x86_avx2_pblendw:
24272 case Intrinsic::x86_avx2_pblendd_128:
24273 case Intrinsic::x86_avx2_pblendd_256:
24274 // Don't try to simplify this intrinsic if we don't have AVX2.
24275 if (!Subtarget->hasAVX2())
24278 case Intrinsic::x86_avx_blend_pd_256:
24279 case Intrinsic::x86_avx_blend_ps_256:
24280 case Intrinsic::x86_avx_blendv_pd_256:
24281 case Intrinsic::x86_avx_blendv_ps_256:
24282 // Don't try to simplify this intrinsic if we don't have AVX.
24283 if (!Subtarget->hasAVX())
24286 case Intrinsic::x86_sse41_pblendw:
24287 case Intrinsic::x86_sse41_blendpd:
24288 case Intrinsic::x86_sse41_blendps:
24289 case Intrinsic::x86_sse41_blendvps:
24290 case Intrinsic::x86_sse41_blendvpd:
24291 case Intrinsic::x86_sse41_pblendvb: {
24292 SDValue Op0 = N->getOperand(1);
24293 SDValue Op1 = N->getOperand(2);
24294 SDValue Mask = N->getOperand(3);
24296 // Don't try to simplify this intrinsic if we don't have SSE4.1.
24297 if (!Subtarget->hasSSE41())
24300 // fold (blend A, A, Mask) -> A
24303 // fold (blend A, B, allZeros) -> A
24304 if (ISD::isBuildVectorAllZeros(Mask.getNode()))
24306 // fold (blend A, B, allOnes) -> B
24307 if (ISD::isBuildVectorAllOnes(Mask.getNode()))
24310 // Simplify the case where the mask is a constant i32 value.
24311 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Mask)) {
24312 if (C->isNullValue())
24314 if (C->isAllOnesValue())
24321 // Packed SSE2/AVX2 arithmetic shift immediate intrinsics.
24322 case Intrinsic::x86_sse2_psrai_w:
24323 case Intrinsic::x86_sse2_psrai_d:
24324 case Intrinsic::x86_avx2_psrai_w:
24325 case Intrinsic::x86_avx2_psrai_d:
24326 case Intrinsic::x86_sse2_psra_w:
24327 case Intrinsic::x86_sse2_psra_d:
24328 case Intrinsic::x86_avx2_psra_w:
24329 case Intrinsic::x86_avx2_psra_d: {
24330 SDValue Op0 = N->getOperand(1);
24331 SDValue Op1 = N->getOperand(2);
24332 EVT VT = Op0.getValueType();
24333 assert(VT.isVector() && "Expected a vector type!");
24335 if (isa<BuildVectorSDNode>(Op1))
24336 Op1 = Op1.getOperand(0);
24338 if (!isa<ConstantSDNode>(Op1))
24341 EVT SVT = VT.getVectorElementType();
24342 unsigned SVTBits = SVT.getSizeInBits();
24344 ConstantSDNode *CND = cast<ConstantSDNode>(Op1);
24345 const APInt &C = APInt(SVTBits, CND->getAPIntValue().getZExtValue());
24346 uint64_t ShAmt = C.getZExtValue();
24348 // Don't try to convert this shift into a ISD::SRA if the shift
24349 // count is bigger than or equal to the element size.
24350 if (ShAmt >= SVTBits)
24353 // Trivial case: if the shift count is zero, then fold this
24354 // into the first operand.
24358 // Replace this packed shift intrinsic with a target independent
24360 SDValue Splat = DAG.getConstant(C, VT);
24361 return DAG.getNode(ISD::SRA, SDLoc(N), VT, Op0, Splat);
24366 /// PerformMulCombine - Optimize a single multiply with constant into two
24367 /// in order to implement it with two cheaper instructions, e.g.
24368 /// LEA + SHL, LEA + LEA.
24369 static SDValue PerformMulCombine(SDNode *N, SelectionDAG &DAG,
24370 TargetLowering::DAGCombinerInfo &DCI) {
24371 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer())
24374 EVT VT = N->getValueType(0);
24375 if (VT != MVT::i64 && VT != MVT::i32)
24378 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N->getOperand(1));
24381 uint64_t MulAmt = C->getZExtValue();
24382 if (isPowerOf2_64(MulAmt) || MulAmt == 3 || MulAmt == 5 || MulAmt == 9)
24385 uint64_t MulAmt1 = 0;
24386 uint64_t MulAmt2 = 0;
24387 if ((MulAmt % 9) == 0) {
24389 MulAmt2 = MulAmt / 9;
24390 } else if ((MulAmt % 5) == 0) {
24392 MulAmt2 = MulAmt / 5;
24393 } else if ((MulAmt % 3) == 0) {
24395 MulAmt2 = MulAmt / 3;
24398 (isPowerOf2_64(MulAmt2) || MulAmt2 == 3 || MulAmt2 == 5 || MulAmt2 == 9)){
24401 if (isPowerOf2_64(MulAmt2) &&
24402 !(N->hasOneUse() && N->use_begin()->getOpcode() == ISD::ADD))
24403 // If second multiplifer is pow2, issue it first. We want the multiply by
24404 // 3, 5, or 9 to be folded into the addressing mode unless the lone use
24406 std::swap(MulAmt1, MulAmt2);
24409 if (isPowerOf2_64(MulAmt1))
24410 NewMul = DAG.getNode(ISD::SHL, DL, VT, N->getOperand(0),
24411 DAG.getConstant(Log2_64(MulAmt1), MVT::i8));
24413 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, N->getOperand(0),
24414 DAG.getConstant(MulAmt1, VT));
24416 if (isPowerOf2_64(MulAmt2))
24417 NewMul = DAG.getNode(ISD::SHL, DL, VT, NewMul,
24418 DAG.getConstant(Log2_64(MulAmt2), MVT::i8));
24420 NewMul = DAG.getNode(X86ISD::MUL_IMM, DL, VT, NewMul,
24421 DAG.getConstant(MulAmt2, VT));
24423 // Do not add new nodes to DAG combiner worklist.
24424 DCI.CombineTo(N, NewMul, false);
24429 static SDValue PerformSHLCombine(SDNode *N, SelectionDAG &DAG) {
24430 SDValue N0 = N->getOperand(0);
24431 SDValue N1 = N->getOperand(1);
24432 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
24433 EVT VT = N0.getValueType();
24435 // fold (shl (and (setcc_c), c1), c2) -> (and setcc_c, (c1 << c2))
24436 // since the result of setcc_c is all zero's or all ones.
24437 if (VT.isInteger() && !VT.isVector() &&
24438 N1C && N0.getOpcode() == ISD::AND &&
24439 N0.getOperand(1).getOpcode() == ISD::Constant) {
24440 SDValue N00 = N0.getOperand(0);
24441 if (N00.getOpcode() == X86ISD::SETCC_CARRY ||
24442 ((N00.getOpcode() == ISD::ANY_EXTEND ||
24443 N00.getOpcode() == ISD::ZERO_EXTEND) &&
24444 N00.getOperand(0).getOpcode() == X86ISD::SETCC_CARRY)) {
24445 APInt Mask = cast<ConstantSDNode>(N0.getOperand(1))->getAPIntValue();
24446 APInt ShAmt = N1C->getAPIntValue();
24447 Mask = Mask.shl(ShAmt);
24449 return DAG.getNode(ISD::AND, SDLoc(N), VT,
24450 N00, DAG.getConstant(Mask, VT));
24454 // Hardware support for vector shifts is sparse which makes us scalarize the
24455 // vector operations in many cases. Also, on sandybridge ADD is faster than
24457 // (shl V, 1) -> add V,V
24458 if (auto *N1BV = dyn_cast<BuildVectorSDNode>(N1))
24459 if (auto *N1SplatC = N1BV->getConstantSplatNode()) {
24460 assert(N0.getValueType().isVector() && "Invalid vector shift type");
24461 // We shift all of the values by one. In many cases we do not have
24462 // hardware support for this operation. This is better expressed as an ADD
24464 if (N1SplatC->getZExtValue() == 1)
24465 return DAG.getNode(ISD::ADD, SDLoc(N), VT, N0, N0);
24471 /// \brief Returns a vector of 0s if the node in input is a vector logical
24472 /// shift by a constant amount which is known to be bigger than or equal
24473 /// to the vector element size in bits.
24474 static SDValue performShiftToAllZeros(SDNode *N, SelectionDAG &DAG,
24475 const X86Subtarget *Subtarget) {
24476 EVT VT = N->getValueType(0);
24478 if (VT != MVT::v2i64 && VT != MVT::v4i32 && VT != MVT::v8i16 &&
24479 (!Subtarget->hasInt256() ||
24480 (VT != MVT::v4i64 && VT != MVT::v8i32 && VT != MVT::v16i16)))
24483 SDValue Amt = N->getOperand(1);
24485 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Amt))
24486 if (auto *AmtSplat = AmtBV->getConstantSplatNode()) {
24487 APInt ShiftAmt = AmtSplat->getAPIntValue();
24488 unsigned MaxAmount = VT.getVectorElementType().getSizeInBits();
24490 // SSE2/AVX2 logical shifts always return a vector of 0s
24491 // if the shift amount is bigger than or equal to
24492 // the element size. The constant shift amount will be
24493 // encoded as a 8-bit immediate.
24494 if (ShiftAmt.trunc(8).uge(MaxAmount))
24495 return getZeroVector(VT, Subtarget, DAG, DL);
24501 /// PerformShiftCombine - Combine shifts.
24502 static SDValue PerformShiftCombine(SDNode* N, SelectionDAG &DAG,
24503 TargetLowering::DAGCombinerInfo &DCI,
24504 const X86Subtarget *Subtarget) {
24505 if (N->getOpcode() == ISD::SHL) {
24506 SDValue V = PerformSHLCombine(N, DAG);
24507 if (V.getNode()) return V;
24510 if (N->getOpcode() != ISD::SRA) {
24511 // Try to fold this logical shift into a zero vector.
24512 SDValue V = performShiftToAllZeros(N, DAG, Subtarget);
24513 if (V.getNode()) return V;
24519 // CMPEQCombine - Recognize the distinctive (AND (setcc ...) (setcc ..))
24520 // where both setccs reference the same FP CMP, and rewrite for CMPEQSS
24521 // and friends. Likewise for OR -> CMPNEQSS.
24522 static SDValue CMPEQCombine(SDNode *N, SelectionDAG &DAG,
24523 TargetLowering::DAGCombinerInfo &DCI,
24524 const X86Subtarget *Subtarget) {
24527 // SSE1 supports CMP{eq|ne}SS, and SSE2 added CMP{eq|ne}SD, but
24528 // we're requiring SSE2 for both.
24529 if (Subtarget->hasSSE2() && isAndOrOfSetCCs(SDValue(N, 0U), opcode)) {
24530 SDValue N0 = N->getOperand(0);
24531 SDValue N1 = N->getOperand(1);
24532 SDValue CMP0 = N0->getOperand(1);
24533 SDValue CMP1 = N1->getOperand(1);
24536 // The SETCCs should both refer to the same CMP.
24537 if (CMP0.getOpcode() != X86ISD::CMP || CMP0 != CMP1)
24540 SDValue CMP00 = CMP0->getOperand(0);
24541 SDValue CMP01 = CMP0->getOperand(1);
24542 EVT VT = CMP00.getValueType();
24544 if (VT == MVT::f32 || VT == MVT::f64) {
24545 bool ExpectingFlags = false;
24546 // Check for any users that want flags:
24547 for (SDNode::use_iterator UI = N->use_begin(), UE = N->use_end();
24548 !ExpectingFlags && UI != UE; ++UI)
24549 switch (UI->getOpcode()) {
24554 ExpectingFlags = true;
24556 case ISD::CopyToReg:
24557 case ISD::SIGN_EXTEND:
24558 case ISD::ZERO_EXTEND:
24559 case ISD::ANY_EXTEND:
24563 if (!ExpectingFlags) {
24564 enum X86::CondCode cc0 = (enum X86::CondCode)N0.getConstantOperandVal(0);
24565 enum X86::CondCode cc1 = (enum X86::CondCode)N1.getConstantOperandVal(0);
24567 if (cc1 == X86::COND_E || cc1 == X86::COND_NE) {
24568 X86::CondCode tmp = cc0;
24573 if ((cc0 == X86::COND_E && cc1 == X86::COND_NP) ||
24574 (cc0 == X86::COND_NE && cc1 == X86::COND_P)) {
24575 // FIXME: need symbolic constants for these magic numbers.
24576 // See X86ATTInstPrinter.cpp:printSSECC().
24577 unsigned x86cc = (cc0 == X86::COND_E) ? 0 : 4;
24578 if (Subtarget->hasAVX512()) {
24579 SDValue FSetCC = DAG.getNode(X86ISD::FSETCC, DL, MVT::i1, CMP00,
24580 CMP01, DAG.getConstant(x86cc, MVT::i8));
24581 if (N->getValueType(0) != MVT::i1)
24582 return DAG.getNode(ISD::ZERO_EXTEND, DL, N->getValueType(0),
24586 SDValue OnesOrZeroesF = DAG.getNode(X86ISD::FSETCC, DL,
24587 CMP00.getValueType(), CMP00, CMP01,
24588 DAG.getConstant(x86cc, MVT::i8));
24590 bool is64BitFP = (CMP00.getValueType() == MVT::f64);
24591 MVT IntVT = is64BitFP ? MVT::i64 : MVT::i32;
24593 if (is64BitFP && !Subtarget->is64Bit()) {
24594 // On a 32-bit target, we cannot bitcast the 64-bit float to a
24595 // 64-bit integer, since that's not a legal type. Since
24596 // OnesOrZeroesF is all ones of all zeroes, we don't need all the
24597 // bits, but can do this little dance to extract the lowest 32 bits
24598 // and work with those going forward.
24599 SDValue Vector64 = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, MVT::v2f64,
24601 SDValue Vector32 = DAG.getNode(ISD::BITCAST, DL, MVT::v4f32,
24603 OnesOrZeroesF = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::f32,
24604 Vector32, DAG.getIntPtrConstant(0));
24608 SDValue OnesOrZeroesI = DAG.getNode(ISD::BITCAST, DL, IntVT, OnesOrZeroesF);
24609 SDValue ANDed = DAG.getNode(ISD::AND, DL, IntVT, OnesOrZeroesI,
24610 DAG.getConstant(1, IntVT));
24611 SDValue OneBitOfTruth = DAG.getNode(ISD::TRUNCATE, DL, MVT::i8, ANDed);
24612 return OneBitOfTruth;
24620 /// CanFoldXORWithAllOnes - Test whether the XOR operand is a AllOnes vector
24621 /// so it can be folded inside ANDNP.
24622 static bool CanFoldXORWithAllOnes(const SDNode *N) {
24623 EVT VT = N->getValueType(0);
24625 // Match direct AllOnes for 128 and 256-bit vectors
24626 if (ISD::isBuildVectorAllOnes(N))
24629 // Look through a bit convert.
24630 if (N->getOpcode() == ISD::BITCAST)
24631 N = N->getOperand(0).getNode();
24633 // Sometimes the operand may come from a insert_subvector building a 256-bit
24635 if (VT.is256BitVector() &&
24636 N->getOpcode() == ISD::INSERT_SUBVECTOR) {
24637 SDValue V1 = N->getOperand(0);
24638 SDValue V2 = N->getOperand(1);
24640 if (V1.getOpcode() == ISD::INSERT_SUBVECTOR &&
24641 V1.getOperand(0).getOpcode() == ISD::UNDEF &&
24642 ISD::isBuildVectorAllOnes(V1.getOperand(1).getNode()) &&
24643 ISD::isBuildVectorAllOnes(V2.getNode()))
24650 // On AVX/AVX2 the type v8i1 is legalized to v8i16, which is an XMM sized
24651 // register. In most cases we actually compare or select YMM-sized registers
24652 // and mixing the two types creates horrible code. This method optimizes
24653 // some of the transition sequences.
24654 static SDValue WidenMaskArithmetic(SDNode *N, SelectionDAG &DAG,
24655 TargetLowering::DAGCombinerInfo &DCI,
24656 const X86Subtarget *Subtarget) {
24657 EVT VT = N->getValueType(0);
24658 if (!VT.is256BitVector())
24661 assert((N->getOpcode() == ISD::ANY_EXTEND ||
24662 N->getOpcode() == ISD::ZERO_EXTEND ||
24663 N->getOpcode() == ISD::SIGN_EXTEND) && "Invalid Node");
24665 SDValue Narrow = N->getOperand(0);
24666 EVT NarrowVT = Narrow->getValueType(0);
24667 if (!NarrowVT.is128BitVector())
24670 if (Narrow->getOpcode() != ISD::XOR &&
24671 Narrow->getOpcode() != ISD::AND &&
24672 Narrow->getOpcode() != ISD::OR)
24675 SDValue N0 = Narrow->getOperand(0);
24676 SDValue N1 = Narrow->getOperand(1);
24679 // The Left side has to be a trunc.
24680 if (N0.getOpcode() != ISD::TRUNCATE)
24683 // The type of the truncated inputs.
24684 EVT WideVT = N0->getOperand(0)->getValueType(0);
24688 // The right side has to be a 'trunc' or a constant vector.
24689 bool RHSTrunc = N1.getOpcode() == ISD::TRUNCATE;
24690 ConstantSDNode *RHSConstSplat = nullptr;
24691 if (auto *RHSBV = dyn_cast<BuildVectorSDNode>(N1))
24692 RHSConstSplat = RHSBV->getConstantSplatNode();
24693 if (!RHSTrunc && !RHSConstSplat)
24696 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
24698 if (!TLI.isOperationLegalOrPromote(Narrow->getOpcode(), WideVT))
24701 // Set N0 and N1 to hold the inputs to the new wide operation.
24702 N0 = N0->getOperand(0);
24703 if (RHSConstSplat) {
24704 N1 = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT.getScalarType(),
24705 SDValue(RHSConstSplat, 0));
24706 SmallVector<SDValue, 8> C(WideVT.getVectorNumElements(), N1);
24707 N1 = DAG.getNode(ISD::BUILD_VECTOR, DL, WideVT, C);
24708 } else if (RHSTrunc) {
24709 N1 = N1->getOperand(0);
24712 // Generate the wide operation.
24713 SDValue Op = DAG.getNode(Narrow->getOpcode(), DL, WideVT, N0, N1);
24714 unsigned Opcode = N->getOpcode();
24716 case ISD::ANY_EXTEND:
24718 case ISD::ZERO_EXTEND: {
24719 unsigned InBits = NarrowVT.getScalarType().getSizeInBits();
24720 APInt Mask = APInt::getAllOnesValue(InBits);
24721 Mask = Mask.zext(VT.getScalarType().getSizeInBits());
24722 return DAG.getNode(ISD::AND, DL, VT,
24723 Op, DAG.getConstant(Mask, VT));
24725 case ISD::SIGN_EXTEND:
24726 return DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, VT,
24727 Op, DAG.getValueType(NarrowVT));
24729 llvm_unreachable("Unexpected opcode");
24733 static SDValue PerformAndCombine(SDNode *N, SelectionDAG &DAG,
24734 TargetLowering::DAGCombinerInfo &DCI,
24735 const X86Subtarget *Subtarget) {
24736 EVT VT = N->getValueType(0);
24737 if (DCI.isBeforeLegalizeOps())
24740 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24744 // Create BEXTR instructions
24745 // BEXTR is ((X >> imm) & (2**size-1))
24746 if (VT == MVT::i32 || VT == MVT::i64) {
24747 SDValue N0 = N->getOperand(0);
24748 SDValue N1 = N->getOperand(1);
24751 // Check for BEXTR.
24752 if ((Subtarget->hasBMI() || Subtarget->hasTBM()) &&
24753 (N0.getOpcode() == ISD::SRA || N0.getOpcode() == ISD::SRL)) {
24754 ConstantSDNode *MaskNode = dyn_cast<ConstantSDNode>(N1);
24755 ConstantSDNode *ShiftNode = dyn_cast<ConstantSDNode>(N0.getOperand(1));
24756 if (MaskNode && ShiftNode) {
24757 uint64_t Mask = MaskNode->getZExtValue();
24758 uint64_t Shift = ShiftNode->getZExtValue();
24759 if (isMask_64(Mask)) {
24760 uint64_t MaskSize = countPopulation(Mask);
24761 if (Shift + MaskSize <= VT.getSizeInBits())
24762 return DAG.getNode(X86ISD::BEXTR, DL, VT, N0.getOperand(0),
24763 DAG.getConstant(Shift | (MaskSize << 8), VT));
24771 // Want to form ANDNP nodes:
24772 // 1) In the hopes of then easily combining them with OR and AND nodes
24773 // to form PBLEND/PSIGN.
24774 // 2) To match ANDN packed intrinsics
24775 if (VT != MVT::v2i64 && VT != MVT::v4i64)
24778 SDValue N0 = N->getOperand(0);
24779 SDValue N1 = N->getOperand(1);
24782 // Check LHS for vnot
24783 if (N0.getOpcode() == ISD::XOR &&
24784 //ISD::isBuildVectorAllOnes(N0.getOperand(1).getNode()))
24785 CanFoldXORWithAllOnes(N0.getOperand(1).getNode()))
24786 return DAG.getNode(X86ISD::ANDNP, DL, VT, N0.getOperand(0), N1);
24788 // Check RHS for vnot
24789 if (N1.getOpcode() == ISD::XOR &&
24790 //ISD::isBuildVectorAllOnes(N1.getOperand(1).getNode()))
24791 CanFoldXORWithAllOnes(N1.getOperand(1).getNode()))
24792 return DAG.getNode(X86ISD::ANDNP, DL, VT, N1.getOperand(0), N0);
24797 static SDValue PerformOrCombine(SDNode *N, SelectionDAG &DAG,
24798 TargetLowering::DAGCombinerInfo &DCI,
24799 const X86Subtarget *Subtarget) {
24800 if (DCI.isBeforeLegalizeOps())
24803 SDValue R = CMPEQCombine(N, DAG, DCI, Subtarget);
24807 SDValue N0 = N->getOperand(0);
24808 SDValue N1 = N->getOperand(1);
24809 EVT VT = N->getValueType(0);
24811 // look for psign/blend
24812 if (VT == MVT::v2i64 || VT == MVT::v4i64) {
24813 if (!Subtarget->hasSSSE3() ||
24814 (VT == MVT::v4i64 && !Subtarget->hasInt256()))
24817 // Canonicalize pandn to RHS
24818 if (N0.getOpcode() == X86ISD::ANDNP)
24820 // or (and (m, y), (pandn m, x))
24821 if (N0.getOpcode() == ISD::AND && N1.getOpcode() == X86ISD::ANDNP) {
24822 SDValue Mask = N1.getOperand(0);
24823 SDValue X = N1.getOperand(1);
24825 if (N0.getOperand(0) == Mask)
24826 Y = N0.getOperand(1);
24827 if (N0.getOperand(1) == Mask)
24828 Y = N0.getOperand(0);
24830 // Check to see if the mask appeared in both the AND and ANDNP and
24834 // Validate that X, Y, and Mask are BIT_CONVERTS, and see through them.
24835 // Look through mask bitcast.
24836 if (Mask.getOpcode() == ISD::BITCAST)
24837 Mask = Mask.getOperand(0);
24838 if (X.getOpcode() == ISD::BITCAST)
24839 X = X.getOperand(0);
24840 if (Y.getOpcode() == ISD::BITCAST)
24841 Y = Y.getOperand(0);
24843 EVT MaskVT = Mask.getValueType();
24845 // Validate that the Mask operand is a vector sra node.
24846 // FIXME: what to do for bytes, since there is a psignb/pblendvb, but
24847 // there is no psrai.b
24848 unsigned EltBits = MaskVT.getVectorElementType().getSizeInBits();
24849 unsigned SraAmt = ~0;
24850 if (Mask.getOpcode() == ISD::SRA) {
24851 if (auto *AmtBV = dyn_cast<BuildVectorSDNode>(Mask.getOperand(1)))
24852 if (auto *AmtConst = AmtBV->getConstantSplatNode())
24853 SraAmt = AmtConst->getZExtValue();
24854 } else if (Mask.getOpcode() == X86ISD::VSRAI) {
24855 SDValue SraC = Mask.getOperand(1);
24856 SraAmt = cast<ConstantSDNode>(SraC)->getZExtValue();
24858 if ((SraAmt + 1) != EltBits)
24863 // Now we know we at least have a plendvb with the mask val. See if
24864 // we can form a psignb/w/d.
24865 // psign = x.type == y.type == mask.type && y = sub(0, x);
24866 if (Y.getOpcode() == ISD::SUB && Y.getOperand(1) == X &&
24867 ISD::isBuildVectorAllZeros(Y.getOperand(0).getNode()) &&
24868 X.getValueType() == MaskVT && Y.getValueType() == MaskVT) {
24869 assert((EltBits == 8 || EltBits == 16 || EltBits == 32) &&
24870 "Unsupported VT for PSIGN");
24871 Mask = DAG.getNode(X86ISD::PSIGN, DL, MaskVT, X, Mask.getOperand(0));
24872 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24874 // PBLENDVB only available on SSE 4.1
24875 if (!Subtarget->hasSSE41())
24878 EVT BlendVT = (VT == MVT::v4i64) ? MVT::v32i8 : MVT::v16i8;
24880 X = DAG.getNode(ISD::BITCAST, DL, BlendVT, X);
24881 Y = DAG.getNode(ISD::BITCAST, DL, BlendVT, Y);
24882 Mask = DAG.getNode(ISD::BITCAST, DL, BlendVT, Mask);
24883 Mask = DAG.getNode(ISD::VSELECT, DL, BlendVT, Mask, Y, X);
24884 return DAG.getNode(ISD::BITCAST, DL, VT, Mask);
24888 if (VT != MVT::i16 && VT != MVT::i32 && VT != MVT::i64)
24891 // fold (or (x << c) | (y >> (64 - c))) ==> (shld64 x, y, c)
24892 MachineFunction &MF = DAG.getMachineFunction();
24894 MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
24896 // SHLD/SHRD instructions have lower register pressure, but on some
24897 // platforms they have higher latency than the equivalent
24898 // series of shifts/or that would otherwise be generated.
24899 // Don't fold (or (x << c) | (y >> (64 - c))) if SHLD/SHRD instructions
24900 // have higher latencies and we are not optimizing for size.
24901 if (!OptForSize && Subtarget->isSHLDSlow())
24904 if (N0.getOpcode() == ISD::SRL && N1.getOpcode() == ISD::SHL)
24906 if (N0.getOpcode() != ISD::SHL || N1.getOpcode() != ISD::SRL)
24908 if (!N0.hasOneUse() || !N1.hasOneUse())
24911 SDValue ShAmt0 = N0.getOperand(1);
24912 if (ShAmt0.getValueType() != MVT::i8)
24914 SDValue ShAmt1 = N1.getOperand(1);
24915 if (ShAmt1.getValueType() != MVT::i8)
24917 if (ShAmt0.getOpcode() == ISD::TRUNCATE)
24918 ShAmt0 = ShAmt0.getOperand(0);
24919 if (ShAmt1.getOpcode() == ISD::TRUNCATE)
24920 ShAmt1 = ShAmt1.getOperand(0);
24923 unsigned Opc = X86ISD::SHLD;
24924 SDValue Op0 = N0.getOperand(0);
24925 SDValue Op1 = N1.getOperand(0);
24926 if (ShAmt0.getOpcode() == ISD::SUB) {
24927 Opc = X86ISD::SHRD;
24928 std::swap(Op0, Op1);
24929 std::swap(ShAmt0, ShAmt1);
24932 unsigned Bits = VT.getSizeInBits();
24933 if (ShAmt1.getOpcode() == ISD::SUB) {
24934 SDValue Sum = ShAmt1.getOperand(0);
24935 if (ConstantSDNode *SumC = dyn_cast<ConstantSDNode>(Sum)) {
24936 SDValue ShAmt1Op1 = ShAmt1.getOperand(1);
24937 if (ShAmt1Op1.getNode()->getOpcode() == ISD::TRUNCATE)
24938 ShAmt1Op1 = ShAmt1Op1.getOperand(0);
24939 if (SumC->getSExtValue() == Bits && ShAmt1Op1 == ShAmt0)
24940 return DAG.getNode(Opc, DL, VT,
24942 DAG.getNode(ISD::TRUNCATE, DL,
24945 } else if (ConstantSDNode *ShAmt1C = dyn_cast<ConstantSDNode>(ShAmt1)) {
24946 ConstantSDNode *ShAmt0C = dyn_cast<ConstantSDNode>(ShAmt0);
24948 ShAmt0C->getSExtValue() + ShAmt1C->getSExtValue() == Bits)
24949 return DAG.getNode(Opc, DL, VT,
24950 N0.getOperand(0), N1.getOperand(0),
24951 DAG.getNode(ISD::TRUNCATE, DL,
24958 // Generate NEG and CMOV for integer abs.
24959 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) {
24960 EVT VT = N->getValueType(0);
24962 // Since X86 does not have CMOV for 8-bit integer, we don't convert
24963 // 8-bit integer abs to NEG and CMOV.
24964 if (VT.isInteger() && VT.getSizeInBits() == 8)
24967 SDValue N0 = N->getOperand(0);
24968 SDValue N1 = N->getOperand(1);
24971 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1)
24972 // and change it to SUB and CMOV.
24973 if (VT.isInteger() && N->getOpcode() == ISD::XOR &&
24974 N0.getOpcode() == ISD::ADD &&
24975 N0.getOperand(1) == N1 &&
24976 N1.getOpcode() == ISD::SRA &&
24977 N1.getOperand(0) == N0.getOperand(0))
24978 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1)))
24979 if (Y1C->getAPIntValue() == VT.getSizeInBits()-1) {
24980 // Generate SUB & CMOV.
24981 SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
24982 DAG.getConstant(0, VT), N0.getOperand(0));
24984 SDValue Ops[] = { N0.getOperand(0), Neg,
24985 DAG.getConstant(X86::COND_GE, MVT::i8),
24986 SDValue(Neg.getNode(), 1) };
24987 return DAG.getNode(X86ISD::CMOV, DL, DAG.getVTList(VT, MVT::Glue), Ops);
24992 // PerformXorCombine - Attempts to turn XOR nodes into BLSMSK nodes
24993 static SDValue PerformXorCombine(SDNode *N, SelectionDAG &DAG,
24994 TargetLowering::DAGCombinerInfo &DCI,
24995 const X86Subtarget *Subtarget) {
24996 if (DCI.isBeforeLegalizeOps())
24999 if (Subtarget->hasCMov()) {
25000 SDValue RV = performIntegerAbsCombine(N, DAG);
25008 /// PerformLOADCombine - Do target-specific dag combines on LOAD nodes.
25009 static SDValue PerformLOADCombine(SDNode *N, SelectionDAG &DAG,
25010 TargetLowering::DAGCombinerInfo &DCI,
25011 const X86Subtarget *Subtarget) {
25012 LoadSDNode *Ld = cast<LoadSDNode>(N);
25013 EVT RegVT = Ld->getValueType(0);
25014 EVT MemVT = Ld->getMemoryVT();
25016 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25018 // For chips with slow 32-byte unaligned loads, break the 32-byte operation
25019 // into two 16-byte operations.
25020 ISD::LoadExtType Ext = Ld->getExtensionType();
25021 unsigned Alignment = Ld->getAlignment();
25022 bool IsAligned = Alignment == 0 || Alignment >= MemVT.getSizeInBits()/8;
25023 if (RegVT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25024 !DCI.isBeforeLegalizeOps() && !IsAligned && Ext == ISD::NON_EXTLOAD) {
25025 unsigned NumElems = RegVT.getVectorNumElements();
25029 SDValue Ptr = Ld->getBasePtr();
25030 SDValue Increment = DAG.getConstant(16, TLI.getPointerTy());
25032 EVT HalfVT = EVT::getVectorVT(*DAG.getContext(), MemVT.getScalarType(),
25034 SDValue Load1 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25035 Ld->getPointerInfo(), Ld->isVolatile(),
25036 Ld->isNonTemporal(), Ld->isInvariant(),
25038 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25039 SDValue Load2 = DAG.getLoad(HalfVT, dl, Ld->getChain(), Ptr,
25040 Ld->getPointerInfo(), Ld->isVolatile(),
25041 Ld->isNonTemporal(), Ld->isInvariant(),
25042 std::min(16U, Alignment));
25043 SDValue TF = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
25045 Load2.getValue(1));
25047 SDValue NewVec = DAG.getUNDEF(RegVT);
25048 NewVec = Insert128BitVector(NewVec, Load1, 0, DAG, dl);
25049 NewVec = Insert128BitVector(NewVec, Load2, NumElems/2, DAG, dl);
25050 return DCI.CombineTo(N, NewVec, TF, true);
25056 /// PerformMLOADCombine - Resolve extending loads
25057 static SDValue PerformMLOADCombine(SDNode *N, SelectionDAG &DAG,
25058 TargetLowering::DAGCombinerInfo &DCI,
25059 const X86Subtarget *Subtarget) {
25060 MaskedLoadSDNode *Mld = cast<MaskedLoadSDNode>(N);
25061 if (Mld->getExtensionType() != ISD::SEXTLOAD)
25064 EVT VT = Mld->getValueType(0);
25065 unsigned NumElems = VT.getVectorNumElements();
25066 EVT LdVT = Mld->getMemoryVT();
25069 assert(LdVT != VT && "Cannot extend to the same type");
25070 unsigned ToSz = VT.getVectorElementType().getSizeInBits();
25071 unsigned FromSz = LdVT.getVectorElementType().getSizeInBits();
25072 // From, To sizes and ElemCount must be pow of two
25073 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25074 "Unexpected size for extending masked load");
25076 unsigned SizeRatio = ToSz / FromSz;
25077 assert(SizeRatio * NumElems * FromSz == VT.getSizeInBits());
25079 // Create a type on which we perform the shuffle
25080 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25081 LdVT.getScalarType(), NumElems*SizeRatio);
25082 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25084 // Convert Src0 value
25085 SDValue WideSrc0 = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mld->getSrc0());
25086 if (Mld->getSrc0().getOpcode() != ISD::UNDEF) {
25087 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25088 for (unsigned i = 0; i != NumElems; ++i)
25089 ShuffleVec[i] = i * SizeRatio;
25091 // Can't shuffle using an illegal type.
25092 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25093 && "WideVecVT should be legal");
25094 WideSrc0 = DAG.getVectorShuffle(WideVecVT, dl, WideSrc0,
25095 DAG.getUNDEF(WideVecVT), &ShuffleVec[0]);
25097 // Prepare the new mask
25099 SDValue Mask = Mld->getMask();
25100 if (Mask.getValueType() == VT) {
25101 // Mask and original value have the same type
25102 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25103 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25104 for (unsigned i = 0; i != NumElems; ++i)
25105 ShuffleVec[i] = i * SizeRatio;
25106 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25107 ShuffleVec[i] = NumElems*SizeRatio;
25108 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25109 DAG.getConstant(0, WideVecVT),
25113 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25114 unsigned WidenNumElts = NumElems*SizeRatio;
25115 unsigned MaskNumElts = VT.getVectorNumElements();
25116 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25119 unsigned NumConcat = WidenNumElts / MaskNumElts;
25120 SmallVector<SDValue, 16> Ops(NumConcat);
25121 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25123 for (unsigned i = 1; i != NumConcat; ++i)
25126 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25129 SDValue WideLd = DAG.getMaskedLoad(WideVecVT, dl, Mld->getChain(),
25130 Mld->getBasePtr(), NewMask, WideSrc0,
25131 Mld->getMemoryVT(), Mld->getMemOperand(),
25133 SDValue NewVec = DAG.getNode(X86ISD::VSEXT, dl, VT, WideLd);
25134 return DCI.CombineTo(N, NewVec, WideLd.getValue(1), true);
25137 /// PerformMSTORECombine - Resolve truncating stores
25138 static SDValue PerformMSTORECombine(SDNode *N, SelectionDAG &DAG,
25139 const X86Subtarget *Subtarget) {
25140 MaskedStoreSDNode *Mst = cast<MaskedStoreSDNode>(N);
25141 if (!Mst->isTruncatingStore())
25144 EVT VT = Mst->getValue().getValueType();
25145 unsigned NumElems = VT.getVectorNumElements();
25146 EVT StVT = Mst->getMemoryVT();
25149 assert(StVT != VT && "Cannot truncate to the same type");
25150 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25151 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25153 // From, To sizes and ElemCount must be pow of two
25154 assert (isPowerOf2_32(NumElems * FromSz * ToSz) &&
25155 "Unexpected size for truncating masked store");
25156 // We are going to use the original vector elt for storing.
25157 // Accumulated smaller vector elements must be a multiple of the store size.
25158 assert (((NumElems * FromSz) % ToSz) == 0 &&
25159 "Unexpected ratio for truncating masked store");
25161 unsigned SizeRatio = FromSz / ToSz;
25162 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25164 // Create a type on which we perform the shuffle
25165 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25166 StVT.getScalarType(), NumElems*SizeRatio);
25168 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25170 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mst->getValue());
25171 SmallVector<int, 16> ShuffleVec(NumElems * SizeRatio, -1);
25172 for (unsigned i = 0; i != NumElems; ++i)
25173 ShuffleVec[i] = i * SizeRatio;
25175 // Can't shuffle using an illegal type.
25176 assert (DAG.getTargetLoweringInfo().isTypeLegal(WideVecVT)
25177 && "WideVecVT should be legal");
25179 SDValue TruncatedVal = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25180 DAG.getUNDEF(WideVecVT),
25184 SDValue Mask = Mst->getMask();
25185 if (Mask.getValueType() == VT) {
25186 // Mask and original value have the same type
25187 NewMask = DAG.getNode(ISD::BITCAST, dl, WideVecVT, Mask);
25188 for (unsigned i = 0; i != NumElems; ++i)
25189 ShuffleVec[i] = i * SizeRatio;
25190 for (unsigned i = NumElems; i != NumElems*SizeRatio; ++i)
25191 ShuffleVec[i] = NumElems*SizeRatio;
25192 NewMask = DAG.getVectorShuffle(WideVecVT, dl, NewMask,
25193 DAG.getConstant(0, WideVecVT),
25197 assert(Mask.getValueType().getVectorElementType() == MVT::i1);
25198 unsigned WidenNumElts = NumElems*SizeRatio;
25199 unsigned MaskNumElts = VT.getVectorNumElements();
25200 EVT NewMaskVT = EVT::getVectorVT(*DAG.getContext(), MVT::i1,
25203 unsigned NumConcat = WidenNumElts / MaskNumElts;
25204 SmallVector<SDValue, 16> Ops(NumConcat);
25205 SDValue ZeroVal = DAG.getConstant(0, Mask.getValueType());
25207 for (unsigned i = 1; i != NumConcat; ++i)
25210 NewMask = DAG.getNode(ISD::CONCAT_VECTORS, dl, NewMaskVT, Ops);
25213 return DAG.getMaskedStore(Mst->getChain(), dl, TruncatedVal, Mst->getBasePtr(),
25214 NewMask, StVT, Mst->getMemOperand(), false);
25216 /// PerformSTORECombine - Do target-specific dag combines on STORE nodes.
25217 static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
25218 const X86Subtarget *Subtarget) {
25219 StoreSDNode *St = cast<StoreSDNode>(N);
25220 EVT VT = St->getValue().getValueType();
25221 EVT StVT = St->getMemoryVT();
25223 SDValue StoredVal = St->getOperand(1);
25224 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25226 // If we are saving a concatenation of two XMM registers and 32-byte stores
25227 // are slow, such as on Sandy Bridge, perform two 16-byte stores.
25228 unsigned Alignment = St->getAlignment();
25229 bool IsAligned = Alignment == 0 || Alignment >= VT.getSizeInBits()/8;
25230 if (VT.is256BitVector() && Subtarget->isUnalignedMem32Slow() &&
25231 StVT == VT && !IsAligned) {
25232 unsigned NumElems = VT.getVectorNumElements();
25236 SDValue Value0 = Extract128BitVector(StoredVal, 0, DAG, dl);
25237 SDValue Value1 = Extract128BitVector(StoredVal, NumElems/2, DAG, dl);
25239 SDValue Stride = DAG.getConstant(16, TLI.getPointerTy());
25240 SDValue Ptr0 = St->getBasePtr();
25241 SDValue Ptr1 = DAG.getNode(ISD::ADD, dl, Ptr0.getValueType(), Ptr0, Stride);
25243 SDValue Ch0 = DAG.getStore(St->getChain(), dl, Value0, Ptr0,
25244 St->getPointerInfo(), St->isVolatile(),
25245 St->isNonTemporal(), Alignment);
25246 SDValue Ch1 = DAG.getStore(St->getChain(), dl, Value1, Ptr1,
25247 St->getPointerInfo(), St->isVolatile(),
25248 St->isNonTemporal(),
25249 std::min(16U, Alignment));
25250 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Ch0, Ch1);
25253 // Optimize trunc store (of multiple scalars) to shuffle and store.
25254 // First, pack all of the elements in one place. Next, store to memory
25255 // in fewer chunks.
25256 if (St->isTruncatingStore() && VT.isVector()) {
25257 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25258 unsigned NumElems = VT.getVectorNumElements();
25259 assert(StVT != VT && "Cannot truncate to the same type");
25260 unsigned FromSz = VT.getVectorElementType().getSizeInBits();
25261 unsigned ToSz = StVT.getVectorElementType().getSizeInBits();
25263 // From, To sizes and ElemCount must be pow of two
25264 if (!isPowerOf2_32(NumElems * FromSz * ToSz)) return SDValue();
25265 // We are going to use the original vector elt for storing.
25266 // Accumulated smaller vector elements must be a multiple of the store size.
25267 if (0 != (NumElems * FromSz) % ToSz) return SDValue();
25269 unsigned SizeRatio = FromSz / ToSz;
25271 assert(SizeRatio * NumElems * ToSz == VT.getSizeInBits());
25273 // Create a type on which we perform the shuffle
25274 EVT WideVecVT = EVT::getVectorVT(*DAG.getContext(),
25275 StVT.getScalarType(), NumElems*SizeRatio);
25277 assert(WideVecVT.getSizeInBits() == VT.getSizeInBits());
25279 SDValue WideVec = DAG.getNode(ISD::BITCAST, dl, WideVecVT, St->getValue());
25280 SmallVector<int, 8> ShuffleVec(NumElems * SizeRatio, -1);
25281 for (unsigned i = 0; i != NumElems; ++i)
25282 ShuffleVec[i] = i * SizeRatio;
25284 // Can't shuffle using an illegal type.
25285 if (!TLI.isTypeLegal(WideVecVT))
25288 SDValue Shuff = DAG.getVectorShuffle(WideVecVT, dl, WideVec,
25289 DAG.getUNDEF(WideVecVT),
25291 // At this point all of the data is stored at the bottom of the
25292 // register. We now need to save it to mem.
25294 // Find the largest store unit
25295 MVT StoreType = MVT::i8;
25296 for (MVT Tp : MVT::integer_valuetypes()) {
25297 if (TLI.isTypeLegal(Tp) && Tp.getSizeInBits() <= NumElems * ToSz)
25301 // On 32bit systems, we can't save 64bit integers. Try bitcasting to F64.
25302 if (TLI.isTypeLegal(MVT::f64) && StoreType.getSizeInBits() < 64 &&
25303 (64 <= NumElems * ToSz))
25304 StoreType = MVT::f64;
25306 // Bitcast the original vector into a vector of store-size units
25307 EVT StoreVecVT = EVT::getVectorVT(*DAG.getContext(),
25308 StoreType, VT.getSizeInBits()/StoreType.getSizeInBits());
25309 assert(StoreVecVT.getSizeInBits() == VT.getSizeInBits());
25310 SDValue ShuffWide = DAG.getNode(ISD::BITCAST, dl, StoreVecVT, Shuff);
25311 SmallVector<SDValue, 8> Chains;
25312 SDValue Increment = DAG.getConstant(StoreType.getSizeInBits()/8,
25313 TLI.getPointerTy());
25314 SDValue Ptr = St->getBasePtr();
25316 // Perform one or more big stores into memory.
25317 for (unsigned i=0, e=(ToSz*NumElems)/StoreType.getSizeInBits(); i!=e; ++i) {
25318 SDValue SubVec = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
25319 StoreType, ShuffWide,
25320 DAG.getIntPtrConstant(i));
25321 SDValue Ch = DAG.getStore(St->getChain(), dl, SubVec, Ptr,
25322 St->getPointerInfo(), St->isVolatile(),
25323 St->isNonTemporal(), St->getAlignment());
25324 Ptr = DAG.getNode(ISD::ADD, dl, Ptr.getValueType(), Ptr, Increment);
25325 Chains.push_back(Ch);
25328 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Chains);
25331 // Turn load->store of MMX types into GPR load/stores. This avoids clobbering
25332 // the FP state in cases where an emms may be missing.
25333 // A preferable solution to the general problem is to figure out the right
25334 // places to insert EMMS. This qualifies as a quick hack.
25336 // Similarly, turn load->store of i64 into double load/stores in 32-bit mode.
25337 if (VT.getSizeInBits() != 64)
25340 const Function *F = DAG.getMachineFunction().getFunction();
25341 bool NoImplicitFloatOps = F->hasFnAttribute(Attribute::NoImplicitFloat);
25342 bool F64IsLegal = !DAG.getTarget().Options.UseSoftFloat && !NoImplicitFloatOps
25343 && Subtarget->hasSSE2();
25344 if ((VT.isVector() ||
25345 (VT == MVT::i64 && F64IsLegal && !Subtarget->is64Bit())) &&
25346 isa<LoadSDNode>(St->getValue()) &&
25347 !cast<LoadSDNode>(St->getValue())->isVolatile() &&
25348 St->getChain().hasOneUse() && !St->isVolatile()) {
25349 SDNode* LdVal = St->getValue().getNode();
25350 LoadSDNode *Ld = nullptr;
25351 int TokenFactorIndex = -1;
25352 SmallVector<SDValue, 8> Ops;
25353 SDNode* ChainVal = St->getChain().getNode();
25354 // Must be a store of a load. We currently handle two cases: the load
25355 // is a direct child, and it's under an intervening TokenFactor. It is
25356 // possible to dig deeper under nested TokenFactors.
25357 if (ChainVal == LdVal)
25358 Ld = cast<LoadSDNode>(St->getChain());
25359 else if (St->getValue().hasOneUse() &&
25360 ChainVal->getOpcode() == ISD::TokenFactor) {
25361 for (unsigned i = 0, e = ChainVal->getNumOperands(); i != e; ++i) {
25362 if (ChainVal->getOperand(i).getNode() == LdVal) {
25363 TokenFactorIndex = i;
25364 Ld = cast<LoadSDNode>(St->getValue());
25366 Ops.push_back(ChainVal->getOperand(i));
25370 if (!Ld || !ISD::isNormalLoad(Ld))
25373 // If this is not the MMX case, i.e. we are just turning i64 load/store
25374 // into f64 load/store, avoid the transformation if there are multiple
25375 // uses of the loaded value.
25376 if (!VT.isVector() && !Ld->hasNUsesOfValue(1, 0))
25381 // If we are a 64-bit capable x86, lower to a single movq load/store pair.
25382 // Otherwise, if it's legal to use f64 SSE instructions, use f64 load/store
25384 if (Subtarget->is64Bit() || F64IsLegal) {
25385 EVT LdVT = Subtarget->is64Bit() ? MVT::i64 : MVT::f64;
25386 SDValue NewLd = DAG.getLoad(LdVT, LdDL, Ld->getChain(), Ld->getBasePtr(),
25387 Ld->getPointerInfo(), Ld->isVolatile(),
25388 Ld->isNonTemporal(), Ld->isInvariant(),
25389 Ld->getAlignment());
25390 SDValue NewChain = NewLd.getValue(1);
25391 if (TokenFactorIndex != -1) {
25392 Ops.push_back(NewChain);
25393 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25395 return DAG.getStore(NewChain, StDL, NewLd, St->getBasePtr(),
25396 St->getPointerInfo(),
25397 St->isVolatile(), St->isNonTemporal(),
25398 St->getAlignment());
25401 // Otherwise, lower to two pairs of 32-bit loads / stores.
25402 SDValue LoAddr = Ld->getBasePtr();
25403 SDValue HiAddr = DAG.getNode(ISD::ADD, LdDL, MVT::i32, LoAddr,
25404 DAG.getConstant(4, MVT::i32));
25406 SDValue LoLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), LoAddr,
25407 Ld->getPointerInfo(),
25408 Ld->isVolatile(), Ld->isNonTemporal(),
25409 Ld->isInvariant(), Ld->getAlignment());
25410 SDValue HiLd = DAG.getLoad(MVT::i32, LdDL, Ld->getChain(), HiAddr,
25411 Ld->getPointerInfo().getWithOffset(4),
25412 Ld->isVolatile(), Ld->isNonTemporal(),
25414 MinAlign(Ld->getAlignment(), 4));
25416 SDValue NewChain = LoLd.getValue(1);
25417 if (TokenFactorIndex != -1) {
25418 Ops.push_back(LoLd);
25419 Ops.push_back(HiLd);
25420 NewChain = DAG.getNode(ISD::TokenFactor, LdDL, MVT::Other, Ops);
25423 LoAddr = St->getBasePtr();
25424 HiAddr = DAG.getNode(ISD::ADD, StDL, MVT::i32, LoAddr,
25425 DAG.getConstant(4, MVT::i32));
25427 SDValue LoSt = DAG.getStore(NewChain, StDL, LoLd, LoAddr,
25428 St->getPointerInfo(),
25429 St->isVolatile(), St->isNonTemporal(),
25430 St->getAlignment());
25431 SDValue HiSt = DAG.getStore(NewChain, StDL, HiLd, HiAddr,
25432 St->getPointerInfo().getWithOffset(4),
25434 St->isNonTemporal(),
25435 MinAlign(St->getAlignment(), 4));
25436 return DAG.getNode(ISD::TokenFactor, StDL, MVT::Other, LoSt, HiSt);
25441 /// Return 'true' if this vector operation is "horizontal"
25442 /// and return the operands for the horizontal operation in LHS and RHS. A
25443 /// horizontal operation performs the binary operation on successive elements
25444 /// of its first operand, then on successive elements of its second operand,
25445 /// returning the resulting values in a vector. For example, if
25446 /// A = < float a0, float a1, float a2, float a3 >
25448 /// B = < float b0, float b1, float b2, float b3 >
25449 /// then the result of doing a horizontal operation on A and B is
25450 /// A horizontal-op B = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >.
25451 /// In short, LHS and RHS are inspected to see if LHS op RHS is of the form
25452 /// A horizontal-op B, for some already available A and B, and if so then LHS is
25453 /// set to A, RHS to B, and the routine returns 'true'.
25454 /// Note that the binary operation should have the property that if one of the
25455 /// operands is UNDEF then the result is UNDEF.
25456 static bool isHorizontalBinOp(SDValue &LHS, SDValue &RHS, bool IsCommutative) {
25457 // Look for the following pattern: if
25458 // A = < float a0, float a1, float a2, float a3 >
25459 // B = < float b0, float b1, float b2, float b3 >
25461 // LHS = VECTOR_SHUFFLE A, B, <0, 2, 4, 6>
25462 // RHS = VECTOR_SHUFFLE A, B, <1, 3, 5, 7>
25463 // then LHS op RHS = < a0 op a1, a2 op a3, b0 op b1, b2 op b3 >
25464 // which is A horizontal-op B.
25466 // At least one of the operands should be a vector shuffle.
25467 if (LHS.getOpcode() != ISD::VECTOR_SHUFFLE &&
25468 RHS.getOpcode() != ISD::VECTOR_SHUFFLE)
25471 MVT VT = LHS.getSimpleValueType();
25473 assert((VT.is128BitVector() || VT.is256BitVector()) &&
25474 "Unsupported vector type for horizontal add/sub");
25476 // Handle 128 and 256-bit vector lengths. AVX defines horizontal add/sub to
25477 // operate independently on 128-bit lanes.
25478 unsigned NumElts = VT.getVectorNumElements();
25479 unsigned NumLanes = VT.getSizeInBits()/128;
25480 unsigned NumLaneElts = NumElts / NumLanes;
25481 assert((NumLaneElts % 2 == 0) &&
25482 "Vector type should have an even number of elements in each lane");
25483 unsigned HalfLaneElts = NumLaneElts/2;
25485 // View LHS in the form
25486 // LHS = VECTOR_SHUFFLE A, B, LMask
25487 // If LHS is not a shuffle then pretend it is the shuffle
25488 // LHS = VECTOR_SHUFFLE LHS, undef, <0, 1, ..., N-1>
25489 // NOTE: in what follows a default initialized SDValue represents an UNDEF of
25492 SmallVector<int, 16> LMask(NumElts);
25493 if (LHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25494 if (LHS.getOperand(0).getOpcode() != ISD::UNDEF)
25495 A = LHS.getOperand(0);
25496 if (LHS.getOperand(1).getOpcode() != ISD::UNDEF)
25497 B = LHS.getOperand(1);
25498 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(LHS.getNode())->getMask();
25499 std::copy(Mask.begin(), Mask.end(), LMask.begin());
25501 if (LHS.getOpcode() != ISD::UNDEF)
25503 for (unsigned i = 0; i != NumElts; ++i)
25507 // Likewise, view RHS in the form
25508 // RHS = VECTOR_SHUFFLE C, D, RMask
25510 SmallVector<int, 16> RMask(NumElts);
25511 if (RHS.getOpcode() == ISD::VECTOR_SHUFFLE) {
25512 if (RHS.getOperand(0).getOpcode() != ISD::UNDEF)
25513 C = RHS.getOperand(0);
25514 if (RHS.getOperand(1).getOpcode() != ISD::UNDEF)
25515 D = RHS.getOperand(1);
25516 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(RHS.getNode())->getMask();
25517 std::copy(Mask.begin(), Mask.end(), RMask.begin());
25519 if (RHS.getOpcode() != ISD::UNDEF)
25521 for (unsigned i = 0; i != NumElts; ++i)
25525 // Check that the shuffles are both shuffling the same vectors.
25526 if (!(A == C && B == D) && !(A == D && B == C))
25529 // If everything is UNDEF then bail out: it would be better to fold to UNDEF.
25530 if (!A.getNode() && !B.getNode())
25533 // If A and B occur in reverse order in RHS, then "swap" them (which means
25534 // rewriting the mask).
25536 CommuteVectorShuffleMask(RMask, NumElts);
25538 // At this point LHS and RHS are equivalent to
25539 // LHS = VECTOR_SHUFFLE A, B, LMask
25540 // RHS = VECTOR_SHUFFLE A, B, RMask
25541 // Check that the masks correspond to performing a horizontal operation.
25542 for (unsigned l = 0; l != NumElts; l += NumLaneElts) {
25543 for (unsigned i = 0; i != NumLaneElts; ++i) {
25544 int LIdx = LMask[i+l], RIdx = RMask[i+l];
25546 // Ignore any UNDEF components.
25547 if (LIdx < 0 || RIdx < 0 ||
25548 (!A.getNode() && (LIdx < (int)NumElts || RIdx < (int)NumElts)) ||
25549 (!B.getNode() && (LIdx >= (int)NumElts || RIdx >= (int)NumElts)))
25552 // Check that successive elements are being operated on. If not, this is
25553 // not a horizontal operation.
25554 unsigned Src = (i/HalfLaneElts); // each lane is split between srcs
25555 int Index = 2*(i%HalfLaneElts) + NumElts*Src + l;
25556 if (!(LIdx == Index && RIdx == Index + 1) &&
25557 !(IsCommutative && LIdx == Index + 1 && RIdx == Index))
25562 LHS = A.getNode() ? A : B; // If A is 'UNDEF', use B for it.
25563 RHS = B.getNode() ? B : A; // If B is 'UNDEF', use A for it.
25567 /// Do target-specific dag combines on floating point adds.
25568 static SDValue PerformFADDCombine(SDNode *N, SelectionDAG &DAG,
25569 const X86Subtarget *Subtarget) {
25570 EVT VT = N->getValueType(0);
25571 SDValue LHS = N->getOperand(0);
25572 SDValue RHS = N->getOperand(1);
25574 // Try to synthesize horizontal adds from adds of shuffles.
25575 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25576 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25577 isHorizontalBinOp(LHS, RHS, true))
25578 return DAG.getNode(X86ISD::FHADD, SDLoc(N), VT, LHS, RHS);
25582 /// Do target-specific dag combines on floating point subs.
25583 static SDValue PerformFSUBCombine(SDNode *N, SelectionDAG &DAG,
25584 const X86Subtarget *Subtarget) {
25585 EVT VT = N->getValueType(0);
25586 SDValue LHS = N->getOperand(0);
25587 SDValue RHS = N->getOperand(1);
25589 // Try to synthesize horizontal subs from subs of shuffles.
25590 if (((Subtarget->hasSSE3() && (VT == MVT::v4f32 || VT == MVT::v2f64)) ||
25591 (Subtarget->hasFp256() && (VT == MVT::v8f32 || VT == MVT::v4f64))) &&
25592 isHorizontalBinOp(LHS, RHS, false))
25593 return DAG.getNode(X86ISD::FHSUB, SDLoc(N), VT, LHS, RHS);
25597 /// Do target-specific dag combines on X86ISD::FOR and X86ISD::FXOR nodes.
25598 static SDValue PerformFORCombine(SDNode *N, SelectionDAG &DAG) {
25599 assert(N->getOpcode() == X86ISD::FOR || N->getOpcode() == X86ISD::FXOR);
25601 // F[X]OR(0.0, x) -> x
25602 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25603 if (C->getValueAPF().isPosZero())
25604 return N->getOperand(1);
25606 // F[X]OR(x, 0.0) -> x
25607 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25608 if (C->getValueAPF().isPosZero())
25609 return N->getOperand(0);
25613 /// Do target-specific dag combines on X86ISD::FMIN and X86ISD::FMAX nodes.
25614 static SDValue PerformFMinFMaxCombine(SDNode *N, SelectionDAG &DAG) {
25615 assert(N->getOpcode() == X86ISD::FMIN || N->getOpcode() == X86ISD::FMAX);
25617 // Only perform optimizations if UnsafeMath is used.
25618 if (!DAG.getTarget().Options.UnsafeFPMath)
25621 // If we run in unsafe-math mode, then convert the FMAX and FMIN nodes
25622 // into FMINC and FMAXC, which are Commutative operations.
25623 unsigned NewOp = 0;
25624 switch (N->getOpcode()) {
25625 default: llvm_unreachable("unknown opcode");
25626 case X86ISD::FMIN: NewOp = X86ISD::FMINC; break;
25627 case X86ISD::FMAX: NewOp = X86ISD::FMAXC; break;
25630 return DAG.getNode(NewOp, SDLoc(N), N->getValueType(0),
25631 N->getOperand(0), N->getOperand(1));
25634 /// Do target-specific dag combines on X86ISD::FAND nodes.
25635 static SDValue PerformFANDCombine(SDNode *N, SelectionDAG &DAG) {
25636 // FAND(0.0, x) -> 0.0
25637 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25638 if (C->getValueAPF().isPosZero())
25639 return N->getOperand(0);
25641 // FAND(x, 0.0) -> 0.0
25642 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25643 if (C->getValueAPF().isPosZero())
25644 return N->getOperand(1);
25649 /// Do target-specific dag combines on X86ISD::FANDN nodes
25650 static SDValue PerformFANDNCombine(SDNode *N, SelectionDAG &DAG) {
25651 // FANDN(0.0, x) -> x
25652 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(0)))
25653 if (C->getValueAPF().isPosZero())
25654 return N->getOperand(1);
25656 // FANDN(x, 0.0) -> 0.0
25657 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(N->getOperand(1)))
25658 if (C->getValueAPF().isPosZero())
25659 return N->getOperand(1);
25664 static SDValue PerformBTCombine(SDNode *N,
25666 TargetLowering::DAGCombinerInfo &DCI) {
25667 // BT ignores high bits in the bit index operand.
25668 SDValue Op1 = N->getOperand(1);
25669 if (Op1.hasOneUse()) {
25670 unsigned BitWidth = Op1.getValueSizeInBits();
25671 APInt DemandedMask = APInt::getLowBitsSet(BitWidth, Log2_32(BitWidth));
25672 APInt KnownZero, KnownOne;
25673 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(),
25674 !DCI.isBeforeLegalizeOps());
25675 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
25676 if (TLO.ShrinkDemandedConstant(Op1, DemandedMask) ||
25677 TLI.SimplifyDemandedBits(Op1, DemandedMask, KnownZero, KnownOne, TLO))
25678 DCI.CommitTargetLoweringOpt(TLO);
25683 static SDValue PerformVZEXT_MOVLCombine(SDNode *N, SelectionDAG &DAG) {
25684 SDValue Op = N->getOperand(0);
25685 if (Op.getOpcode() == ISD::BITCAST)
25686 Op = Op.getOperand(0);
25687 EVT VT = N->getValueType(0), OpVT = Op.getValueType();
25688 if (Op.getOpcode() == X86ISD::VZEXT_LOAD &&
25689 VT.getVectorElementType().getSizeInBits() ==
25690 OpVT.getVectorElementType().getSizeInBits()) {
25691 return DAG.getNode(ISD::BITCAST, SDLoc(N), VT, Op);
25696 static SDValue PerformSIGN_EXTEND_INREGCombine(SDNode *N, SelectionDAG &DAG,
25697 const X86Subtarget *Subtarget) {
25698 EVT VT = N->getValueType(0);
25699 if (!VT.isVector())
25702 SDValue N0 = N->getOperand(0);
25703 SDValue N1 = N->getOperand(1);
25704 EVT ExtraVT = cast<VTSDNode>(N1)->getVT();
25707 // The SIGN_EXTEND_INREG to v4i64 is expensive operation on the
25708 // both SSE and AVX2 since there is no sign-extended shift right
25709 // operation on a vector with 64-bit elements.
25710 //(sext_in_reg (v4i64 anyext (v4i32 x )), ExtraVT) ->
25711 // (v4i64 sext (v4i32 sext_in_reg (v4i32 x , ExtraVT)))
25712 if (VT == MVT::v4i64 && (N0.getOpcode() == ISD::ANY_EXTEND ||
25713 N0.getOpcode() == ISD::SIGN_EXTEND)) {
25714 SDValue N00 = N0.getOperand(0);
25716 // EXTLOAD has a better solution on AVX2,
25717 // it may be replaced with X86ISD::VSEXT node.
25718 if (N00.getOpcode() == ISD::LOAD && Subtarget->hasInt256())
25719 if (!ISD::isNormalLoad(N00.getNode()))
25722 if (N00.getValueType() == MVT::v4i32 && ExtraVT.getSizeInBits() < 128) {
25723 SDValue Tmp = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32,
25725 return DAG.getNode(ISD::SIGN_EXTEND, dl, MVT::v4i64, Tmp);
25731 static SDValue PerformSExtCombine(SDNode *N, SelectionDAG &DAG,
25732 TargetLowering::DAGCombinerInfo &DCI,
25733 const X86Subtarget *Subtarget) {
25734 SDValue N0 = N->getOperand(0);
25735 EVT VT = N->getValueType(0);
25737 // (i8,i32 sext (sdivrem (i8 x, i8 y)) ->
25738 // (i8,i32 (sdivrem_sext_hreg (i8 x, i8 y)
25739 // This exposes the sext to the sdivrem lowering, so that it directly extends
25740 // from AH (which we otherwise need to do contortions to access).
25741 if (N0.getOpcode() == ISD::SDIVREM && N0.getResNo() == 1 &&
25742 N0.getValueType() == MVT::i8 && VT == MVT::i32) {
25744 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25745 SDValue R = DAG.getNode(X86ISD::SDIVREM8_SEXT_HREG, dl, NodeTys,
25746 N0.getOperand(0), N0.getOperand(1));
25747 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25748 return R.getValue(1);
25751 if (!DCI.isBeforeLegalizeOps())
25754 if (!Subtarget->hasFp256())
25757 if (VT.isVector() && VT.getSizeInBits() == 256) {
25758 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25766 static SDValue PerformFMACombine(SDNode *N, SelectionDAG &DAG,
25767 const X86Subtarget* Subtarget) {
25769 EVT VT = N->getValueType(0);
25771 // Let legalize expand this if it isn't a legal type yet.
25772 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
25775 EVT ScalarVT = VT.getScalarType();
25776 if ((ScalarVT != MVT::f32 && ScalarVT != MVT::f64) ||
25777 (!Subtarget->hasFMA() && !Subtarget->hasFMA4()))
25780 SDValue A = N->getOperand(0);
25781 SDValue B = N->getOperand(1);
25782 SDValue C = N->getOperand(2);
25784 bool NegA = (A.getOpcode() == ISD::FNEG);
25785 bool NegB = (B.getOpcode() == ISD::FNEG);
25786 bool NegC = (C.getOpcode() == ISD::FNEG);
25788 // Negative multiplication when NegA xor NegB
25789 bool NegMul = (NegA != NegB);
25791 A = A.getOperand(0);
25793 B = B.getOperand(0);
25795 C = C.getOperand(0);
25799 Opcode = (!NegC) ? X86ISD::FMADD : X86ISD::FMSUB;
25801 Opcode = (!NegC) ? X86ISD::FNMADD : X86ISD::FNMSUB;
25803 return DAG.getNode(Opcode, dl, VT, A, B, C);
25806 static SDValue PerformZExtCombine(SDNode *N, SelectionDAG &DAG,
25807 TargetLowering::DAGCombinerInfo &DCI,
25808 const X86Subtarget *Subtarget) {
25809 // (i32 zext (and (i8 x86isd::setcc_carry), 1)) ->
25810 // (and (i32 x86isd::setcc_carry), 1)
25811 // This eliminates the zext. This transformation is necessary because
25812 // ISD::SETCC is always legalized to i8.
25814 SDValue N0 = N->getOperand(0);
25815 EVT VT = N->getValueType(0);
25817 if (N0.getOpcode() == ISD::AND &&
25819 N0.getOperand(0).hasOneUse()) {
25820 SDValue N00 = N0.getOperand(0);
25821 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25822 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
25823 if (!C || C->getZExtValue() != 1)
25825 return DAG.getNode(ISD::AND, dl, VT,
25826 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25827 N00.getOperand(0), N00.getOperand(1)),
25828 DAG.getConstant(1, VT));
25832 if (N0.getOpcode() == ISD::TRUNCATE &&
25834 N0.getOperand(0).hasOneUse()) {
25835 SDValue N00 = N0.getOperand(0);
25836 if (N00.getOpcode() == X86ISD::SETCC_CARRY) {
25837 return DAG.getNode(ISD::AND, dl, VT,
25838 DAG.getNode(X86ISD::SETCC_CARRY, dl, VT,
25839 N00.getOperand(0), N00.getOperand(1)),
25840 DAG.getConstant(1, VT));
25843 if (VT.is256BitVector()) {
25844 SDValue R = WidenMaskArithmetic(N, DAG, DCI, Subtarget);
25849 // (i8,i32 zext (udivrem (i8 x, i8 y)) ->
25850 // (i8,i32 (udivrem_zext_hreg (i8 x, i8 y)
25851 // This exposes the zext to the udivrem lowering, so that it directly extends
25852 // from AH (which we otherwise need to do contortions to access).
25853 if (N0.getOpcode() == ISD::UDIVREM &&
25854 N0.getResNo() == 1 && N0.getValueType() == MVT::i8 &&
25855 (VT == MVT::i32 || VT == MVT::i64)) {
25856 SDVTList NodeTys = DAG.getVTList(MVT::i8, VT);
25857 SDValue R = DAG.getNode(X86ISD::UDIVREM8_ZEXT_HREG, dl, NodeTys,
25858 N0.getOperand(0), N0.getOperand(1));
25859 DAG.ReplaceAllUsesOfValueWith(N0.getValue(0), R.getValue(0));
25860 return R.getValue(1);
25866 // Optimize x == -y --> x+y == 0
25867 // x != -y --> x+y != 0
25868 static SDValue PerformISDSETCCCombine(SDNode *N, SelectionDAG &DAG,
25869 const X86Subtarget* Subtarget) {
25870 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get();
25871 SDValue LHS = N->getOperand(0);
25872 SDValue RHS = N->getOperand(1);
25873 EVT VT = N->getValueType(0);
25876 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && LHS.getOpcode() == ISD::SUB)
25877 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(LHS.getOperand(0)))
25878 if (C->getAPIntValue() == 0 && LHS.hasOneUse()) {
25879 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25880 LHS.getValueType(), RHS, LHS.getOperand(1));
25881 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25882 addV, DAG.getConstant(0, addV.getValueType()), CC);
25884 if ((CC == ISD::SETNE || CC == ISD::SETEQ) && RHS.getOpcode() == ISD::SUB)
25885 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(RHS.getOperand(0)))
25886 if (C->getAPIntValue() == 0 && RHS.hasOneUse()) {
25887 SDValue addV = DAG.getNode(ISD::ADD, SDLoc(N),
25888 RHS.getValueType(), LHS, RHS.getOperand(1));
25889 return DAG.getSetCC(SDLoc(N), N->getValueType(0),
25890 addV, DAG.getConstant(0, addV.getValueType()), CC);
25893 if (VT.getScalarType() == MVT::i1) {
25894 bool IsSEXT0 = (LHS.getOpcode() == ISD::SIGN_EXTEND) &&
25895 (LHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25896 bool IsVZero0 = ISD::isBuildVectorAllZeros(LHS.getNode());
25897 if (!IsSEXT0 && !IsVZero0)
25899 bool IsSEXT1 = (RHS.getOpcode() == ISD::SIGN_EXTEND) &&
25900 (RHS.getOperand(0).getValueType().getScalarType() == MVT::i1);
25901 bool IsVZero1 = ISD::isBuildVectorAllZeros(RHS.getNode());
25903 if (!IsSEXT1 && !IsVZero1)
25906 if (IsSEXT0 && IsVZero1) {
25907 assert(VT == LHS.getOperand(0).getValueType() && "Uexpected operand type");
25908 if (CC == ISD::SETEQ)
25909 return DAG.getNOT(DL, LHS.getOperand(0), VT);
25910 return LHS.getOperand(0);
25912 if (IsSEXT1 && IsVZero0) {
25913 assert(VT == RHS.getOperand(0).getValueType() && "Uexpected operand type");
25914 if (CC == ISD::SETEQ)
25915 return DAG.getNOT(DL, RHS.getOperand(0), VT);
25916 return RHS.getOperand(0);
25923 static SDValue PerformINSERTPSCombine(SDNode *N, SelectionDAG &DAG,
25924 const X86Subtarget *Subtarget) {
25926 MVT VT = N->getOperand(1)->getSimpleValueType(0);
25927 assert((VT == MVT::v4f32 || VT == MVT::v4i32) &&
25928 "X86insertps is only defined for v4x32");
25930 SDValue Ld = N->getOperand(1);
25931 if (MayFoldLoad(Ld)) {
25932 // Extract the countS bits from the immediate so we can get the proper
25933 // address when narrowing the vector load to a specific element.
25934 // When the second source op is a memory address, interps doesn't use
25935 // countS and just gets an f32 from that address.
25936 unsigned DestIndex =
25937 cast<ConstantSDNode>(N->getOperand(2))->getZExtValue() >> 6;
25938 Ld = NarrowVectorLoadToElement(cast<LoadSDNode>(Ld), DestIndex, DAG);
25942 // Create this as a scalar to vector to match the instruction pattern.
25943 SDValue LoadScalarToVector = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Ld);
25944 // countS bits are ignored when loading from memory on insertps, which
25945 // means we don't need to explicitly set them to 0.
25946 return DAG.getNode(X86ISD::INSERTPS, dl, VT, N->getOperand(0),
25947 LoadScalarToVector, N->getOperand(2));
25950 // Helper function of PerformSETCCCombine. It is to materialize "setb reg"
25951 // as "sbb reg,reg", since it can be extended without zext and produces
25952 // an all-ones bit which is more useful than 0/1 in some cases.
25953 static SDValue MaterializeSETB(SDLoc DL, SDValue EFLAGS, SelectionDAG &DAG,
25956 return DAG.getNode(ISD::AND, DL, VT,
25957 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25958 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS),
25959 DAG.getConstant(1, VT));
25960 assert (VT == MVT::i1 && "Unexpected type for SECCC node");
25961 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i1,
25962 DAG.getNode(X86ISD::SETCC_CARRY, DL, MVT::i8,
25963 DAG.getConstant(X86::COND_B, MVT::i8), EFLAGS));
25966 // Optimize RES = X86ISD::SETCC CONDCODE, EFLAG_INPUT
25967 static SDValue PerformSETCCCombine(SDNode *N, SelectionDAG &DAG,
25968 TargetLowering::DAGCombinerInfo &DCI,
25969 const X86Subtarget *Subtarget) {
25971 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(0));
25972 SDValue EFLAGS = N->getOperand(1);
25974 if (CC == X86::COND_A) {
25975 // Try to convert COND_A into COND_B in an attempt to facilitate
25976 // materializing "setb reg".
25978 // Do not flip "e > c", where "c" is a constant, because Cmp instruction
25979 // cannot take an immediate as its first operand.
25981 if (EFLAGS.getOpcode() == X86ISD::SUB && EFLAGS.hasOneUse() &&
25982 EFLAGS.getValueType().isInteger() &&
25983 !isa<ConstantSDNode>(EFLAGS.getOperand(1))) {
25984 SDValue NewSub = DAG.getNode(X86ISD::SUB, SDLoc(EFLAGS),
25985 EFLAGS.getNode()->getVTList(),
25986 EFLAGS.getOperand(1), EFLAGS.getOperand(0));
25987 SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
25988 return MaterializeSETB(DL, NewEFLAGS, DAG, N->getSimpleValueType(0));
25992 // Materialize "setb reg" as "sbb reg,reg", since it can be extended without
25993 // a zext and produces an all-ones bit which is more useful than 0/1 in some
25995 if (CC == X86::COND_B)
25996 return MaterializeSETB(DL, EFLAGS, DAG, N->getSimpleValueType(0));
26000 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26001 if (Flags.getNode()) {
26002 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26003 return DAG.getNode(X86ISD::SETCC, DL, N->getVTList(), Cond, Flags);
26009 // Optimize branch condition evaluation.
26011 static SDValue PerformBrCondCombine(SDNode *N, SelectionDAG &DAG,
26012 TargetLowering::DAGCombinerInfo &DCI,
26013 const X86Subtarget *Subtarget) {
26015 SDValue Chain = N->getOperand(0);
26016 SDValue Dest = N->getOperand(1);
26017 SDValue EFLAGS = N->getOperand(3);
26018 X86::CondCode CC = X86::CondCode(N->getConstantOperandVal(2));
26022 Flags = checkBoolTestSetCCCombine(EFLAGS, CC);
26023 if (Flags.getNode()) {
26024 SDValue Cond = DAG.getConstant(CC, MVT::i8);
26025 return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), Chain, Dest, Cond,
26032 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N,
26033 SelectionDAG &DAG) {
26034 // Take advantage of vector comparisons producing 0 or -1 in each lane to
26035 // optimize away operation when it's from a constant.
26037 // The general transformation is:
26038 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) -->
26039 // AND(VECTOR_CMP(x,y), constant2)
26040 // constant2 = UNARYOP(constant)
26042 // Early exit if this isn't a vector operation, the operand of the
26043 // unary operation isn't a bitwise AND, or if the sizes of the operations
26044 // aren't the same.
26045 EVT VT = N->getValueType(0);
26046 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND ||
26047 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC ||
26048 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits())
26051 // Now check that the other operand of the AND is a constant. We could
26052 // make the transformation for non-constant splats as well, but it's unclear
26053 // that would be a benefit as it would not eliminate any operations, just
26054 // perform one more step in scalar code before moving to the vector unit.
26055 if (BuildVectorSDNode *BV =
26056 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) {
26057 // Bail out if the vector isn't a constant.
26058 if (!BV->isConstant())
26061 // Everything checks out. Build up the new and improved node.
26063 EVT IntVT = BV->getValueType(0);
26064 // Create a new constant of the appropriate type for the transformed
26066 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0));
26067 // The AND node needs bitcasts to/from an integer vector type around it.
26068 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst);
26069 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT,
26070 N->getOperand(0)->getOperand(0), MaskConst);
26071 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd);
26078 static SDValue PerformSINT_TO_FPCombine(SDNode *N, SelectionDAG &DAG,
26079 const X86Subtarget *Subtarget) {
26080 // First try to optimize away the conversion entirely when it's
26081 // conditionally from a constant. Vectors only.
26082 SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG);
26083 if (Res != SDValue())
26086 // Now move on to more general possibilities.
26087 SDValue Op0 = N->getOperand(0);
26088 EVT InVT = Op0->getValueType(0);
26090 // SINT_TO_FP(v4i8) -> SINT_TO_FP(SEXT(v4i8 to v4i32))
26091 if (InVT == MVT::v8i8 || InVT == MVT::v4i8) {
26093 MVT DstVT = InVT == MVT::v4i8 ? MVT::v4i32 : MVT::v8i32;
26094 SDValue P = DAG.getNode(ISD::SIGN_EXTEND, dl, DstVT, Op0);
26095 return DAG.getNode(ISD::SINT_TO_FP, dl, N->getValueType(0), P);
26098 // Transform (SINT_TO_FP (i64 ...)) into an x87 operation if we have
26099 // a 32-bit target where SSE doesn't support i64->FP operations.
26100 if (Op0.getOpcode() == ISD::LOAD) {
26101 LoadSDNode *Ld = cast<LoadSDNode>(Op0.getNode());
26102 EVT VT = Ld->getValueType(0);
26103 if (!Ld->isVolatile() && !N->getValueType(0).isVector() &&
26104 ISD::isNON_EXTLoad(Op0.getNode()) && Op0.hasOneUse() &&
26105 !Subtarget->is64Bit() && VT == MVT::i64) {
26106 SDValue FILDChain = Subtarget->getTargetLowering()->BuildFILD(
26107 SDValue(N, 0), Ld->getValueType(0), Ld->getChain(), Op0, DAG);
26108 DAG.ReplaceAllUsesOfValueWith(Op0.getValue(1), FILDChain.getValue(1));
26115 // Optimize RES, EFLAGS = X86ISD::ADC LHS, RHS, EFLAGS
26116 static SDValue PerformADCCombine(SDNode *N, SelectionDAG &DAG,
26117 X86TargetLowering::DAGCombinerInfo &DCI) {
26118 // If the LHS and RHS of the ADC node are zero, then it can't overflow and
26119 // the result is either zero or one (depending on the input carry bit).
26120 // Strength reduce this down to a "set on carry" aka SETCC_CARRY&1.
26121 if (X86::isZeroNode(N->getOperand(0)) &&
26122 X86::isZeroNode(N->getOperand(1)) &&
26123 // We don't have a good way to replace an EFLAGS use, so only do this when
26125 SDValue(N, 1).use_empty()) {
26127 EVT VT = N->getValueType(0);
26128 SDValue CarryOut = DAG.getConstant(0, N->getValueType(1));
26129 SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
26130 DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
26131 DAG.getConstant(X86::COND_B,MVT::i8),
26133 DAG.getConstant(1, VT));
26134 return DCI.CombineTo(N, Res1, CarryOut);
26140 // fold (add Y, (sete X, 0)) -> adc 0, Y
26141 // (add Y, (setne X, 0)) -> sbb -1, Y
26142 // (sub (sete X, 0), Y) -> sbb 0, Y
26143 // (sub (setne X, 0), Y) -> adc -1, Y
26144 static SDValue OptimizeConditionalInDecrement(SDNode *N, SelectionDAG &DAG) {
26147 // Look through ZExts.
26148 SDValue Ext = N->getOperand(N->getOpcode() == ISD::SUB ? 1 : 0);
26149 if (Ext.getOpcode() != ISD::ZERO_EXTEND || !Ext.hasOneUse())
26152 SDValue SetCC = Ext.getOperand(0);
26153 if (SetCC.getOpcode() != X86ISD::SETCC || !SetCC.hasOneUse())
26156 X86::CondCode CC = (X86::CondCode)SetCC.getConstantOperandVal(0);
26157 if (CC != X86::COND_E && CC != X86::COND_NE)
26160 SDValue Cmp = SetCC.getOperand(1);
26161 if (Cmp.getOpcode() != X86ISD::CMP || !Cmp.hasOneUse() ||
26162 !X86::isZeroNode(Cmp.getOperand(1)) ||
26163 !Cmp.getOperand(0).getValueType().isInteger())
26166 SDValue CmpOp0 = Cmp.getOperand(0);
26167 SDValue NewCmp = DAG.getNode(X86ISD::CMP, DL, MVT::i32, CmpOp0,
26168 DAG.getConstant(1, CmpOp0.getValueType()));
26170 SDValue OtherVal = N->getOperand(N->getOpcode() == ISD::SUB ? 0 : 1);
26171 if (CC == X86::COND_NE)
26172 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::ADC : X86ISD::SBB,
26173 DL, OtherVal.getValueType(), OtherVal,
26174 DAG.getConstant(-1ULL, OtherVal.getValueType()), NewCmp);
26175 return DAG.getNode(N->getOpcode() == ISD::SUB ? X86ISD::SBB : X86ISD::ADC,
26176 DL, OtherVal.getValueType(), OtherVal,
26177 DAG.getConstant(0, OtherVal.getValueType()), NewCmp);
26180 /// PerformADDCombine - Do target-specific dag combines on integer adds.
26181 static SDValue PerformAddCombine(SDNode *N, SelectionDAG &DAG,
26182 const X86Subtarget *Subtarget) {
26183 EVT VT = N->getValueType(0);
26184 SDValue Op0 = N->getOperand(0);
26185 SDValue Op1 = N->getOperand(1);
26187 // Try to synthesize horizontal adds from adds of shuffles.
26188 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26189 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26190 isHorizontalBinOp(Op0, Op1, true))
26191 return DAG.getNode(X86ISD::HADD, SDLoc(N), VT, Op0, Op1);
26193 return OptimizeConditionalInDecrement(N, DAG);
26196 static SDValue PerformSubCombine(SDNode *N, SelectionDAG &DAG,
26197 const X86Subtarget *Subtarget) {
26198 SDValue Op0 = N->getOperand(0);
26199 SDValue Op1 = N->getOperand(1);
26201 // X86 can't encode an immediate LHS of a sub. See if we can push the
26202 // negation into a preceding instruction.
26203 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op0)) {
26204 // If the RHS of the sub is a XOR with one use and a constant, invert the
26205 // immediate. Then add one to the LHS of the sub so we can turn
26206 // X-Y -> X+~Y+1, saving one register.
26207 if (Op1->hasOneUse() && Op1.getOpcode() == ISD::XOR &&
26208 isa<ConstantSDNode>(Op1.getOperand(1))) {
26209 APInt XorC = cast<ConstantSDNode>(Op1.getOperand(1))->getAPIntValue();
26210 EVT VT = Op0.getValueType();
26211 SDValue NewXor = DAG.getNode(ISD::XOR, SDLoc(Op1), VT,
26213 DAG.getConstant(~XorC, VT));
26214 return DAG.getNode(ISD::ADD, SDLoc(N), VT, NewXor,
26215 DAG.getConstant(C->getAPIntValue()+1, VT));
26219 // Try to synthesize horizontal adds from adds of shuffles.
26220 EVT VT = N->getValueType(0);
26221 if (((Subtarget->hasSSSE3() && (VT == MVT::v8i16 || VT == MVT::v4i32)) ||
26222 (Subtarget->hasInt256() && (VT == MVT::v16i16 || VT == MVT::v8i32))) &&
26223 isHorizontalBinOp(Op0, Op1, true))
26224 return DAG.getNode(X86ISD::HSUB, SDLoc(N), VT, Op0, Op1);
26226 return OptimizeConditionalInDecrement(N, DAG);
26229 /// performVZEXTCombine - Performs build vector combines
26230 static SDValue performVZEXTCombine(SDNode *N, SelectionDAG &DAG,
26231 TargetLowering::DAGCombinerInfo &DCI,
26232 const X86Subtarget *Subtarget) {
26234 MVT VT = N->getSimpleValueType(0);
26235 SDValue Op = N->getOperand(0);
26236 MVT OpVT = Op.getSimpleValueType();
26237 MVT OpEltVT = OpVT.getVectorElementType();
26238 unsigned InputBits = OpEltVT.getSizeInBits() * VT.getVectorNumElements();
26240 // (vzext (bitcast (vzext (x)) -> (vzext x)
26242 while (V.getOpcode() == ISD::BITCAST)
26243 V = V.getOperand(0);
26245 if (V != Op && V.getOpcode() == X86ISD::VZEXT) {
26246 MVT InnerVT = V.getSimpleValueType();
26247 MVT InnerEltVT = InnerVT.getVectorElementType();
26249 // If the element sizes match exactly, we can just do one larger vzext. This
26250 // is always an exact type match as vzext operates on integer types.
26251 if (OpEltVT == InnerEltVT) {
26252 assert(OpVT == InnerVT && "Types must match for vzext!");
26253 return DAG.getNode(X86ISD::VZEXT, DL, VT, V.getOperand(0));
26256 // The only other way we can combine them is if only a single element of the
26257 // inner vzext is used in the input to the outer vzext.
26258 if (InnerEltVT.getSizeInBits() < InputBits)
26261 // In this case, the inner vzext is completely dead because we're going to
26262 // only look at bits inside of the low element. Just do the outer vzext on
26263 // a bitcast of the input to the inner.
26264 return DAG.getNode(X86ISD::VZEXT, DL, VT,
26265 DAG.getNode(ISD::BITCAST, DL, OpVT, V));
26268 // Check if we can bypass extracting and re-inserting an element of an input
26269 // vector. Essentialy:
26270 // (bitcast (sclr2vec (ext_vec_elt x))) -> (bitcast x)
26271 if (V.getOpcode() == ISD::SCALAR_TO_VECTOR &&
26272 V.getOperand(0).getOpcode() == ISD::EXTRACT_VECTOR_ELT &&
26273 V.getOperand(0).getSimpleValueType().getSizeInBits() == InputBits) {
26274 SDValue ExtractedV = V.getOperand(0);
26275 SDValue OrigV = ExtractedV.getOperand(0);
26276 if (auto *ExtractIdx = dyn_cast<ConstantSDNode>(ExtractedV.getOperand(1)))
26277 if (ExtractIdx->getZExtValue() == 0) {
26278 MVT OrigVT = OrigV.getSimpleValueType();
26279 // Extract a subvector if necessary...
26280 if (OrigVT.getSizeInBits() > OpVT.getSizeInBits()) {
26281 int Ratio = OrigVT.getSizeInBits() / OpVT.getSizeInBits();
26282 OrigVT = MVT::getVectorVT(OrigVT.getVectorElementType(),
26283 OrigVT.getVectorNumElements() / Ratio);
26284 OrigV = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, OrigVT, OrigV,
26285 DAG.getIntPtrConstant(0));
26287 Op = DAG.getNode(ISD::BITCAST, DL, OpVT, OrigV);
26288 return DAG.getNode(X86ISD::VZEXT, DL, VT, Op);
26295 SDValue X86TargetLowering::PerformDAGCombine(SDNode *N,
26296 DAGCombinerInfo &DCI) const {
26297 SelectionDAG &DAG = DCI.DAG;
26298 switch (N->getOpcode()) {
26300 case ISD::EXTRACT_VECTOR_ELT:
26301 return PerformEXTRACT_VECTOR_ELTCombine(N, DAG, DCI);
26304 case X86ISD::SHRUNKBLEND:
26305 return PerformSELECTCombine(N, DAG, DCI, Subtarget);
26306 case ISD::BITCAST: return PerformBITCASTCombine(N, DAG);
26307 case X86ISD::CMOV: return PerformCMOVCombine(N, DAG, DCI, Subtarget);
26308 case ISD::ADD: return PerformAddCombine(N, DAG, Subtarget);
26309 case ISD::SUB: return PerformSubCombine(N, DAG, Subtarget);
26310 case X86ISD::ADC: return PerformADCCombine(N, DAG, DCI);
26311 case ISD::MUL: return PerformMulCombine(N, DAG, DCI);
26314 case ISD::SRL: return PerformShiftCombine(N, DAG, DCI, Subtarget);
26315 case ISD::AND: return PerformAndCombine(N, DAG, DCI, Subtarget);
26316 case ISD::OR: return PerformOrCombine(N, DAG, DCI, Subtarget);
26317 case ISD::XOR: return PerformXorCombine(N, DAG, DCI, Subtarget);
26318 case ISD::LOAD: return PerformLOADCombine(N, DAG, DCI, Subtarget);
26319 case ISD::MLOAD: return PerformMLOADCombine(N, DAG, DCI, Subtarget);
26320 case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
26321 case ISD::MSTORE: return PerformMSTORECombine(N, DAG, Subtarget);
26322 case ISD::SINT_TO_FP: return PerformSINT_TO_FPCombine(N, DAG, Subtarget);
26323 case ISD::FADD: return PerformFADDCombine(N, DAG, Subtarget);
26324 case ISD::FSUB: return PerformFSUBCombine(N, DAG, Subtarget);
26326 case X86ISD::FOR: return PerformFORCombine(N, DAG);
26328 case X86ISD::FMAX: return PerformFMinFMaxCombine(N, DAG);
26329 case X86ISD::FAND: return PerformFANDCombine(N, DAG);
26330 case X86ISD::FANDN: return PerformFANDNCombine(N, DAG);
26331 case X86ISD::BT: return PerformBTCombine(N, DAG, DCI);
26332 case X86ISD::VZEXT_MOVL: return PerformVZEXT_MOVLCombine(N, DAG);
26333 case ISD::ANY_EXTEND:
26334 case ISD::ZERO_EXTEND: return PerformZExtCombine(N, DAG, DCI, Subtarget);
26335 case ISD::SIGN_EXTEND: return PerformSExtCombine(N, DAG, DCI, Subtarget);
26336 case ISD::SIGN_EXTEND_INREG:
26337 return PerformSIGN_EXTEND_INREGCombine(N, DAG, Subtarget);
26338 case ISD::TRUNCATE: return PerformTruncateCombine(N, DAG,DCI,Subtarget);
26339 case ISD::SETCC: return PerformISDSETCCCombine(N, DAG, Subtarget);
26340 case X86ISD::SETCC: return PerformSETCCCombine(N, DAG, DCI, Subtarget);
26341 case X86ISD::BRCOND: return PerformBrCondCombine(N, DAG, DCI, Subtarget);
26342 case X86ISD::VZEXT: return performVZEXTCombine(N, DAG, DCI, Subtarget);
26343 case X86ISD::SHUFP: // Handle all target specific shuffles
26344 case X86ISD::PALIGNR:
26345 case X86ISD::UNPCKH:
26346 case X86ISD::UNPCKL:
26347 case X86ISD::MOVHLPS:
26348 case X86ISD::MOVLHPS:
26349 case X86ISD::PSHUFB:
26350 case X86ISD::PSHUFD:
26351 case X86ISD::PSHUFHW:
26352 case X86ISD::PSHUFLW:
26353 case X86ISD::MOVSS:
26354 case X86ISD::MOVSD:
26355 case X86ISD::VPERMILPI:
26356 case X86ISD::VPERM2X128:
26357 case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, DCI,Subtarget);
26358 case ISD::FMA: return PerformFMACombine(N, DAG, Subtarget);
26359 case ISD::INTRINSIC_WO_CHAIN:
26360 return PerformINTRINSIC_WO_CHAINCombine(N, DAG, Subtarget);
26361 case X86ISD::INSERTPS: {
26362 if (getTargetMachine().getOptLevel() > CodeGenOpt::None)
26363 return PerformINSERTPSCombine(N, DAG, Subtarget);
26366 case ISD::BUILD_VECTOR: return PerformBUILD_VECTORCombine(N, DAG, Subtarget);
26372 /// isTypeDesirableForOp - Return true if the target has native support for
26373 /// the specified value type and it is 'desirable' to use the type for the
26374 /// given node type. e.g. On x86 i16 is legal, but undesirable since i16
26375 /// instruction encodings are longer and some i16 instructions are slow.
26376 bool X86TargetLowering::isTypeDesirableForOp(unsigned Opc, EVT VT) const {
26377 if (!isTypeLegal(VT))
26379 if (VT != MVT::i16)
26386 case ISD::SIGN_EXTEND:
26387 case ISD::ZERO_EXTEND:
26388 case ISD::ANY_EXTEND:
26401 /// IsDesirableToPromoteOp - This method query the target whether it is
26402 /// beneficial for dag combiner to promote the specified node. If true, it
26403 /// should return the desired promotion type by reference.
26404 bool X86TargetLowering::IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const {
26405 EVT VT = Op.getValueType();
26406 if (VT != MVT::i16)
26409 bool Promote = false;
26410 bool Commute = false;
26411 switch (Op.getOpcode()) {
26414 LoadSDNode *LD = cast<LoadSDNode>(Op);
26415 // If the non-extending load has a single use and it's not live out, then it
26416 // might be folded.
26417 if (LD->getExtensionType() == ISD::NON_EXTLOAD /*&&
26418 Op.hasOneUse()*/) {
26419 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
26420 UE = Op.getNode()->use_end(); UI != UE; ++UI) {
26421 // The only case where we'd want to promote LOAD (rather then it being
26422 // promoted as an operand is when it's only use is liveout.
26423 if (UI->getOpcode() != ISD::CopyToReg)
26430 case ISD::SIGN_EXTEND:
26431 case ISD::ZERO_EXTEND:
26432 case ISD::ANY_EXTEND:
26437 SDValue N0 = Op.getOperand(0);
26438 // Look out for (store (shl (load), x)).
26439 if (MayFoldLoad(N0) && MayFoldIntoStore(Op))
26452 SDValue N0 = Op.getOperand(0);
26453 SDValue N1 = Op.getOperand(1);
26454 if (!Commute && MayFoldLoad(N1))
26456 // Avoid disabling potential load folding opportunities.
26457 if (MayFoldLoad(N0) && (!isa<ConstantSDNode>(N1) || MayFoldIntoStore(Op)))
26459 if (MayFoldLoad(N1) && (!isa<ConstantSDNode>(N0) || MayFoldIntoStore(Op)))
26469 //===----------------------------------------------------------------------===//
26470 // X86 Inline Assembly Support
26471 //===----------------------------------------------------------------------===//
26474 // Helper to match a string separated by whitespace.
26475 bool matchAsmImpl(StringRef s, ArrayRef<const StringRef *> args) {
26476 s = s.substr(s.find_first_not_of(" \t")); // Skip leading whitespace.
26478 for (unsigned i = 0, e = args.size(); i != e; ++i) {
26479 StringRef piece(*args[i]);
26480 if (!s.startswith(piece)) // Check if the piece matches.
26483 s = s.substr(piece.size());
26484 StringRef::size_type pos = s.find_first_not_of(" \t");
26485 if (pos == 0) // We matched a prefix.
26493 const VariadicFunction1<bool, StringRef, StringRef, matchAsmImpl> matchAsm={};
26496 static bool clobbersFlagRegisters(const SmallVector<StringRef, 4> &AsmPieces) {
26498 if (AsmPieces.size() == 3 || AsmPieces.size() == 4) {
26499 if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{cc}") &&
26500 std::count(AsmPieces.begin(), AsmPieces.end(), "~{flags}") &&
26501 std::count(AsmPieces.begin(), AsmPieces.end(), "~{fpsr}")) {
26503 if (AsmPieces.size() == 3)
26505 else if (std::count(AsmPieces.begin(), AsmPieces.end(), "~{dirflag}"))
26512 bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const {
26513 InlineAsm *IA = cast<InlineAsm>(CI->getCalledValue());
26515 std::string AsmStr = IA->getAsmString();
26517 IntegerType *Ty = dyn_cast<IntegerType>(CI->getType());
26518 if (!Ty || Ty->getBitWidth() % 16 != 0)
26521 // TODO: should remove alternatives from the asmstring: "foo {a|b}" -> "foo a"
26522 SmallVector<StringRef, 4> AsmPieces;
26523 SplitString(AsmStr, AsmPieces, ";\n");
26525 switch (AsmPieces.size()) {
26526 default: return false;
26528 // FIXME: this should verify that we are targeting a 486 or better. If not,
26529 // we will turn this bswap into something that will be lowered to logical
26530 // ops instead of emitting the bswap asm. For now, we don't support 486 or
26531 // lower so don't worry about this.
26533 if (matchAsm(AsmPieces[0], "bswap", "$0") ||
26534 matchAsm(AsmPieces[0], "bswapl", "$0") ||
26535 matchAsm(AsmPieces[0], "bswapq", "$0") ||
26536 matchAsm(AsmPieces[0], "bswap", "${0:q}") ||
26537 matchAsm(AsmPieces[0], "bswapl", "${0:q}") ||
26538 matchAsm(AsmPieces[0], "bswapq", "${0:q}")) {
26539 // No need to check constraints, nothing other than the equivalent of
26540 // "=r,0" would be valid here.
26541 return IntrinsicLowering::LowerToByteSwap(CI);
26544 // rorw $$8, ${0:w} --> llvm.bswap.i16
26545 if (CI->getType()->isIntegerTy(16) &&
26546 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26547 (matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") ||
26548 matchAsm(AsmPieces[0], "rolw", "$$8,", "${0:w}"))) {
26550 const std::string &ConstraintsStr = IA->getConstraintString();
26551 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26552 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26553 if (clobbersFlagRegisters(AsmPieces))
26554 return IntrinsicLowering::LowerToByteSwap(CI);
26558 if (CI->getType()->isIntegerTy(32) &&
26559 IA->getConstraintString().compare(0, 5, "=r,0,") == 0 &&
26560 matchAsm(AsmPieces[0], "rorw", "$$8,", "${0:w}") &&
26561 matchAsm(AsmPieces[1], "rorl", "$$16,", "$0") &&
26562 matchAsm(AsmPieces[2], "rorw", "$$8,", "${0:w}")) {
26564 const std::string &ConstraintsStr = IA->getConstraintString();
26565 SplitString(StringRef(ConstraintsStr).substr(5), AsmPieces, ",");
26566 array_pod_sort(AsmPieces.begin(), AsmPieces.end());
26567 if (clobbersFlagRegisters(AsmPieces))
26568 return IntrinsicLowering::LowerToByteSwap(CI);
26571 if (CI->getType()->isIntegerTy(64)) {
26572 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
26573 if (Constraints.size() >= 2 &&
26574 Constraints[0].Codes.size() == 1 && Constraints[0].Codes[0] == "A" &&
26575 Constraints[1].Codes.size() == 1 && Constraints[1].Codes[0] == "0") {
26576 // bswap %eax / bswap %edx / xchgl %eax, %edx -> llvm.bswap.i64
26577 if (matchAsm(AsmPieces[0], "bswap", "%eax") &&
26578 matchAsm(AsmPieces[1], "bswap", "%edx") &&
26579 matchAsm(AsmPieces[2], "xchgl", "%eax,", "%edx"))
26580 return IntrinsicLowering::LowerToByteSwap(CI);
26588 /// getConstraintType - Given a constraint letter, return the type of
26589 /// constraint it is for this target.
26590 X86TargetLowering::ConstraintType
26591 X86TargetLowering::getConstraintType(const std::string &Constraint) const {
26592 if (Constraint.size() == 1) {
26593 switch (Constraint[0]) {
26604 return C_RegisterClass;
26628 return TargetLowering::getConstraintType(Constraint);
26631 /// Examine constraint type and operand type and determine a weight value.
26632 /// This object must already have been set up with the operand type
26633 /// and the current alternative constraint selected.
26634 TargetLowering::ConstraintWeight
26635 X86TargetLowering::getSingleConstraintMatchWeight(
26636 AsmOperandInfo &info, const char *constraint) const {
26637 ConstraintWeight weight = CW_Invalid;
26638 Value *CallOperandVal = info.CallOperandVal;
26639 // If we don't have a value, we can't do a match,
26640 // but allow it at the lowest weight.
26641 if (!CallOperandVal)
26643 Type *type = CallOperandVal->getType();
26644 // Look at the constraint type.
26645 switch (*constraint) {
26647 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint);
26658 if (CallOperandVal->getType()->isIntegerTy())
26659 weight = CW_SpecificReg;
26664 if (type->isFloatingPointTy())
26665 weight = CW_SpecificReg;
26668 if (type->isX86_MMXTy() && Subtarget->hasMMX())
26669 weight = CW_SpecificReg;
26673 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget->hasSSE1()) ||
26674 ((type->getPrimitiveSizeInBits() == 256) && Subtarget->hasFp256()))
26675 weight = CW_Register;
26678 if (ConstantInt *C = dyn_cast<ConstantInt>(info.CallOperandVal)) {
26679 if (C->getZExtValue() <= 31)
26680 weight = CW_Constant;
26684 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26685 if (C->getZExtValue() <= 63)
26686 weight = CW_Constant;
26690 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26691 if ((C->getSExtValue() >= -0x80) && (C->getSExtValue() <= 0x7f))
26692 weight = CW_Constant;
26696 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26697 if ((C->getZExtValue() == 0xff) || (C->getZExtValue() == 0xffff))
26698 weight = CW_Constant;
26702 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26703 if (C->getZExtValue() <= 3)
26704 weight = CW_Constant;
26708 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26709 if (C->getZExtValue() <= 0xff)
26710 weight = CW_Constant;
26715 if (dyn_cast<ConstantFP>(CallOperandVal)) {
26716 weight = CW_Constant;
26720 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26721 if ((C->getSExtValue() >= -0x80000000LL) &&
26722 (C->getSExtValue() <= 0x7fffffffLL))
26723 weight = CW_Constant;
26727 if (ConstantInt *C = dyn_cast<ConstantInt>(CallOperandVal)) {
26728 if (C->getZExtValue() <= 0xffffffff)
26729 weight = CW_Constant;
26736 /// LowerXConstraint - try to replace an X constraint, which matches anything,
26737 /// with another that has more specific requirements based on the type of the
26738 /// corresponding operand.
26739 const char *X86TargetLowering::
26740 LowerXConstraint(EVT ConstraintVT) const {
26741 // FP X constraints get lowered to SSE1/2 registers if available, otherwise
26742 // 'f' like normal targets.
26743 if (ConstraintVT.isFloatingPoint()) {
26744 if (Subtarget->hasSSE2())
26746 if (Subtarget->hasSSE1())
26750 return TargetLowering::LowerXConstraint(ConstraintVT);
26753 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
26754 /// vector. If it is invalid, don't add anything to Ops.
26755 void X86TargetLowering::LowerAsmOperandForConstraint(SDValue Op,
26756 std::string &Constraint,
26757 std::vector<SDValue>&Ops,
26758 SelectionDAG &DAG) const {
26761 // Only support length 1 constraints for now.
26762 if (Constraint.length() > 1) return;
26764 char ConstraintLetter = Constraint[0];
26765 switch (ConstraintLetter) {
26768 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26769 if (C->getZExtValue() <= 31) {
26770 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26776 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26777 if (C->getZExtValue() <= 63) {
26778 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26784 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26785 if (isInt<8>(C->getSExtValue())) {
26786 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26792 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26793 if (C->getZExtValue() == 0xff || C->getZExtValue() == 0xffff ||
26794 (Subtarget->is64Bit() && C->getZExtValue() == 0xffffffff)) {
26795 Result = DAG.getTargetConstant(C->getSExtValue(), Op.getValueType());
26801 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26802 if (C->getZExtValue() <= 3) {
26803 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26809 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26810 if (C->getZExtValue() <= 255) {
26811 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26817 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26818 if (C->getZExtValue() <= 127) {
26819 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26825 // 32-bit signed value
26826 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26827 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26828 C->getSExtValue())) {
26829 // Widen to 64 bits here to get it sign extended.
26830 Result = DAG.getTargetConstant(C->getSExtValue(), MVT::i64);
26833 // FIXME gcc accepts some relocatable values here too, but only in certain
26834 // memory models; it's complicated.
26839 // 32-bit unsigned value
26840 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op)) {
26841 if (ConstantInt::isValueValidForType(Type::getInt32Ty(*DAG.getContext()),
26842 C->getZExtValue())) {
26843 Result = DAG.getTargetConstant(C->getZExtValue(), Op.getValueType());
26847 // FIXME gcc accepts some relocatable values here too, but only in certain
26848 // memory models; it's complicated.
26852 // Literal immediates are always ok.
26853 if (ConstantSDNode *CST = dyn_cast<ConstantSDNode>(Op)) {
26854 // Widen to 64 bits here to get it sign extended.
26855 Result = DAG.getTargetConstant(CST->getSExtValue(), MVT::i64);
26859 // In any sort of PIC mode addresses need to be computed at runtime by
26860 // adding in a register or some sort of table lookup. These can't
26861 // be used as immediates.
26862 if (Subtarget->isPICStyleGOT() || Subtarget->isPICStyleStubPIC())
26865 // If we are in non-pic codegen mode, we allow the address of a global (with
26866 // an optional displacement) to be used with 'i'.
26867 GlobalAddressSDNode *GA = nullptr;
26868 int64_t Offset = 0;
26870 // Match either (GA), (GA+C), (GA+C1+C2), etc.
26872 if ((GA = dyn_cast<GlobalAddressSDNode>(Op))) {
26873 Offset += GA->getOffset();
26875 } else if (Op.getOpcode() == ISD::ADD) {
26876 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26877 Offset += C->getZExtValue();
26878 Op = Op.getOperand(0);
26881 } else if (Op.getOpcode() == ISD::SUB) {
26882 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
26883 Offset += -C->getZExtValue();
26884 Op = Op.getOperand(0);
26889 // Otherwise, this isn't something we can handle, reject it.
26893 const GlobalValue *GV = GA->getGlobal();
26894 // If we require an extra load to get this address, as in PIC mode, we
26895 // can't accept it.
26896 if (isGlobalStubReference(
26897 Subtarget->ClassifyGlobalReference(GV, DAG.getTarget())))
26900 Result = DAG.getTargetGlobalAddress(GV, SDLoc(Op),
26901 GA->getValueType(0), Offset);
26906 if (Result.getNode()) {
26907 Ops.push_back(Result);
26910 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
26913 std::pair<unsigned, const TargetRegisterClass*>
26914 X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
26916 // First, see if this is a constraint that directly corresponds to an LLVM
26918 if (Constraint.size() == 1) {
26919 // GCC Constraint Letters
26920 switch (Constraint[0]) {
26922 // TODO: Slight differences here in allocation order and leaving
26923 // RIP in the class. Do they matter any more here than they do
26924 // in the normal allocation?
26925 case 'q': // GENERAL_REGS in 64-bit mode, Q_REGS in 32-bit mode.
26926 if (Subtarget->is64Bit()) {
26927 if (VT == MVT::i32 || VT == MVT::f32)
26928 return std::make_pair(0U, &X86::GR32RegClass);
26929 if (VT == MVT::i16)
26930 return std::make_pair(0U, &X86::GR16RegClass);
26931 if (VT == MVT::i8 || VT == MVT::i1)
26932 return std::make_pair(0U, &X86::GR8RegClass);
26933 if (VT == MVT::i64 || VT == MVT::f64)
26934 return std::make_pair(0U, &X86::GR64RegClass);
26937 // 32-bit fallthrough
26938 case 'Q': // Q_REGS
26939 if (VT == MVT::i32 || VT == MVT::f32)
26940 return std::make_pair(0U, &X86::GR32_ABCDRegClass);
26941 if (VT == MVT::i16)
26942 return std::make_pair(0U, &X86::GR16_ABCDRegClass);
26943 if (VT == MVT::i8 || VT == MVT::i1)
26944 return std::make_pair(0U, &X86::GR8_ABCD_LRegClass);
26945 if (VT == MVT::i64)
26946 return std::make_pair(0U, &X86::GR64_ABCDRegClass);
26948 case 'r': // GENERAL_REGS
26949 case 'l': // INDEX_REGS
26950 if (VT == MVT::i8 || VT == MVT::i1)
26951 return std::make_pair(0U, &X86::GR8RegClass);
26952 if (VT == MVT::i16)
26953 return std::make_pair(0U, &X86::GR16RegClass);
26954 if (VT == MVT::i32 || VT == MVT::f32 || !Subtarget->is64Bit())
26955 return std::make_pair(0U, &X86::GR32RegClass);
26956 return std::make_pair(0U, &X86::GR64RegClass);
26957 case 'R': // LEGACY_REGS
26958 if (VT == MVT::i8 || VT == MVT::i1)
26959 return std::make_pair(0U, &X86::GR8_NOREXRegClass);
26960 if (VT == MVT::i16)
26961 return std::make_pair(0U, &X86::GR16_NOREXRegClass);
26962 if (VT == MVT::i32 || !Subtarget->is64Bit())
26963 return std::make_pair(0U, &X86::GR32_NOREXRegClass);
26964 return std::make_pair(0U, &X86::GR64_NOREXRegClass);
26965 case 'f': // FP Stack registers.
26966 // If SSE is enabled for this VT, use f80 to ensure the isel moves the
26967 // value to the correct fpstack register class.
26968 if (VT == MVT::f32 && !isScalarFPTypeInSSEReg(VT))
26969 return std::make_pair(0U, &X86::RFP32RegClass);
26970 if (VT == MVT::f64 && !isScalarFPTypeInSSEReg(VT))
26971 return std::make_pair(0U, &X86::RFP64RegClass);
26972 return std::make_pair(0U, &X86::RFP80RegClass);
26973 case 'y': // MMX_REGS if MMX allowed.
26974 if (!Subtarget->hasMMX()) break;
26975 return std::make_pair(0U, &X86::VR64RegClass);
26976 case 'Y': // SSE_REGS if SSE2 allowed
26977 if (!Subtarget->hasSSE2()) break;
26979 case 'x': // SSE_REGS if SSE1 allowed or AVX_REGS if AVX allowed
26980 if (!Subtarget->hasSSE1()) break;
26982 switch (VT.SimpleTy) {
26984 // Scalar SSE types.
26987 return std::make_pair(0U, &X86::FR32RegClass);
26990 return std::make_pair(0U, &X86::FR64RegClass);
26998 return std::make_pair(0U, &X86::VR128RegClass);
27006 return std::make_pair(0U, &X86::VR256RegClass);
27011 return std::make_pair(0U, &X86::VR512RegClass);
27017 // Use the default implementation in TargetLowering to convert the register
27018 // constraint into a member of a register class.
27019 std::pair<unsigned, const TargetRegisterClass*> Res;
27020 Res = TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
27022 // Not found as a standard register?
27024 // Map st(0) -> st(7) -> ST0
27025 if (Constraint.size() == 7 && Constraint[0] == '{' &&
27026 tolower(Constraint[1]) == 's' &&
27027 tolower(Constraint[2]) == 't' &&
27028 Constraint[3] == '(' &&
27029 (Constraint[4] >= '0' && Constraint[4] <= '7') &&
27030 Constraint[5] == ')' &&
27031 Constraint[6] == '}') {
27033 Res.first = X86::FP0+Constraint[4]-'0';
27034 Res.second = &X86::RFP80RegClass;
27038 // GCC allows "st(0)" to be called just plain "st".
27039 if (StringRef("{st}").equals_lower(Constraint)) {
27040 Res.first = X86::FP0;
27041 Res.second = &X86::RFP80RegClass;
27046 if (StringRef("{flags}").equals_lower(Constraint)) {
27047 Res.first = X86::EFLAGS;
27048 Res.second = &X86::CCRRegClass;
27052 // 'A' means EAX + EDX.
27053 if (Constraint == "A") {
27054 Res.first = X86::EAX;
27055 Res.second = &X86::GR32_ADRegClass;
27061 // Otherwise, check to see if this is a register class of the wrong value
27062 // type. For example, we want to map "{ax},i32" -> {eax}, we don't want it to
27063 // turn into {ax},{dx}.
27064 if (Res.second->hasType(VT))
27065 return Res; // Correct type already, nothing to do.
27067 // All of the single-register GCC register classes map their values onto
27068 // 16-bit register pieces "ax","dx","cx","bx","si","di","bp","sp". If we
27069 // really want an 8-bit or 32-bit register, map to the appropriate register
27070 // class and return the appropriate register.
27071 if (Res.second == &X86::GR16RegClass) {
27072 if (VT == MVT::i8 || VT == MVT::i1) {
27073 unsigned DestReg = 0;
27074 switch (Res.first) {
27076 case X86::AX: DestReg = X86::AL; break;
27077 case X86::DX: DestReg = X86::DL; break;
27078 case X86::CX: DestReg = X86::CL; break;
27079 case X86::BX: DestReg = X86::BL; break;
27082 Res.first = DestReg;
27083 Res.second = &X86::GR8RegClass;
27085 } else if (VT == MVT::i32 || VT == MVT::f32) {
27086 unsigned DestReg = 0;
27087 switch (Res.first) {
27089 case X86::AX: DestReg = X86::EAX; break;
27090 case X86::DX: DestReg = X86::EDX; break;
27091 case X86::CX: DestReg = X86::ECX; break;
27092 case X86::BX: DestReg = X86::EBX; break;
27093 case X86::SI: DestReg = X86::ESI; break;
27094 case X86::DI: DestReg = X86::EDI; break;
27095 case X86::BP: DestReg = X86::EBP; break;
27096 case X86::SP: DestReg = X86::ESP; break;
27099 Res.first = DestReg;
27100 Res.second = &X86::GR32RegClass;
27102 } else if (VT == MVT::i64 || VT == MVT::f64) {
27103 unsigned DestReg = 0;
27104 switch (Res.first) {
27106 case X86::AX: DestReg = X86::RAX; break;
27107 case X86::DX: DestReg = X86::RDX; break;
27108 case X86::CX: DestReg = X86::RCX; break;
27109 case X86::BX: DestReg = X86::RBX; break;
27110 case X86::SI: DestReg = X86::RSI; break;
27111 case X86::DI: DestReg = X86::RDI; break;
27112 case X86::BP: DestReg = X86::RBP; break;
27113 case X86::SP: DestReg = X86::RSP; break;
27116 Res.first = DestReg;
27117 Res.second = &X86::GR64RegClass;
27120 } else if (Res.second == &X86::FR32RegClass ||
27121 Res.second == &X86::FR64RegClass ||
27122 Res.second == &X86::VR128RegClass ||
27123 Res.second == &X86::VR256RegClass ||
27124 Res.second == &X86::FR32XRegClass ||
27125 Res.second == &X86::FR64XRegClass ||
27126 Res.second == &X86::VR128XRegClass ||
27127 Res.second == &X86::VR256XRegClass ||
27128 Res.second == &X86::VR512RegClass) {
27129 // Handle references to XMM physical registers that got mapped into the
27130 // wrong class. This can happen with constraints like {xmm0} where the
27131 // target independent register mapper will just pick the first match it can
27132 // find, ignoring the required type.
27134 if (VT == MVT::f32 || VT == MVT::i32)
27135 Res.second = &X86::FR32RegClass;
27136 else if (VT == MVT::f64 || VT == MVT::i64)
27137 Res.second = &X86::FR64RegClass;
27138 else if (X86::VR128RegClass.hasType(VT))
27139 Res.second = &X86::VR128RegClass;
27140 else if (X86::VR256RegClass.hasType(VT))
27141 Res.second = &X86::VR256RegClass;
27142 else if (X86::VR512RegClass.hasType(VT))
27143 Res.second = &X86::VR512RegClass;
27149 int X86TargetLowering::getScalingFactorCost(const AddrMode &AM,
27151 // Scaling factors are not free at all.
27152 // An indexed folded instruction, i.e., inst (reg1, reg2, scale),
27153 // will take 2 allocations in the out of order engine instead of 1
27154 // for plain addressing mode, i.e. inst (reg1).
27156 // vaddps (%rsi,%drx), %ymm0, %ymm1
27157 // Requires two allocations (one for the load, one for the computation)
27159 // vaddps (%rsi), %ymm0, %ymm1
27160 // Requires just 1 allocation, i.e., freeing allocations for other operations
27161 // and having less micro operations to execute.
27163 // For some X86 architectures, this is even worse because for instance for
27164 // stores, the complex addressing mode forces the instruction to use the
27165 // "load" ports instead of the dedicated "store" port.
27166 // E.g., on Haswell:
27167 // vmovaps %ymm1, (%r8, %rdi) can use port 2 or 3.
27168 // vmovaps %ymm1, (%r8) can use port 2, 3, or 7.
27169 if (isLegalAddressingMode(AM, Ty))
27170 // Scale represents reg2 * scale, thus account for 1
27171 // as soon as we use a second register.
27172 return AM.Scale != 0;
27176 bool X86TargetLowering::isTargetFTOL() const {
27177 return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();